Dataset Viewer
python_code
stringlengths 0
258k
|
---|
## @package optimizer_test_util
# Module caffe2.python.optimizer_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace, cnn
class OptimizerTestBase(object):
"""
This is an abstract base class.
Don't inherit from unittest.TestCase, and don't name it 'Test*'.
Do, however, do these things in classes which inherit from this.
"""
def testDense(self):
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
data = np.random.randint(
2,
size=(20, perfect_model.size)).astype(np.float32)
label = np.dot(data, perfect_model)[:, np.newaxis]
model = cnn.CNNModelHelper("NCHW", name="test")
out = model.FC(
'data', 'fc', perfect_model.size, 1, ('ConstantFill', {}),
('ConstantFill', {}), axis=0
)
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
optimizer = self.build_optimizer(model)
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
workspace.FeedBlob('data', data[idx])
workspace.FeedBlob('label', label[idx])
workspace.RunNet(model.net.Proto().name)
np.testing.assert_allclose(
perfect_model[np.newaxis, :],
workspace.FetchBlob('fc_w'),
atol=1e-2
)
self.check_optimizer(optimizer)
def testSparse(self):
# to test duplicated indices we assign two indices to each weight and
# thus each weight might count once or twice
DUPLICATION = 2
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
data = np.random.randint(
2,
size=(20, perfect_model.size * DUPLICATION)).astype(np.float32)
label = np.dot(data, np.repeat(perfect_model, DUPLICATION))
model = cnn.CNNModelHelper("NCHW", name="test")
# imitate what model wrapper does
w = model.param_init_net.ConstantFill(
[], 'w', shape=[perfect_model.size], value=0.0)
model.params.append(w)
picked = model.net.Gather([w, 'indices'], 'gather')
out = model.ReduceFrontSum(picked, 'sum')
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
for indices_type in [np.int32, np.int64]:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
# transform into indices of binary features
indices = np.repeat(np.arange(perfect_model.size),
DUPLICATION)[data[idx] == 1]
if indices.size == 0:
continue
workspace.FeedBlob(
'indices',
indices.reshape((indices.size,)).astype(indices_type)
)
workspace.FeedBlob('label',
np.array(label[idx]).astype(np.float32))
workspace.RunNet(model.net.Proto().name)
np.testing.assert_allclose(
perfect_model,
workspace.FetchBlob('w'),
atol=1e-2
)
self.check_optimizer(optimizer)
|
## @package muji
# Module caffe2.python.muji
"""muji.py does multi-gpu training for caffe2 with no need to change the c++
side code. Everything is defined on the computation graph level.
Currently, here are the assumptions: we only support the following use cases:
- 2 gpus, where peer access is enabled between them.
- 4 gpus, where peer access are enabled between all of them.
- 8 gpus, where peer access are enabled in two groups,
between {1, 2, 3, 4} and {5, 6, 7, 8}.
"""
from caffe2.python import core
from caffe2.proto import caffe2_pb2
def OnGPU(gpu_id):
"""A utility function that returns a device option protobuf of the
specified gpu id.
"""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option
def OnCPU():
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
return device_option
def Allreduce(net, blobs, reduced_affix="_reduced", gpu_indices=None):
"""The general Allreduce interface that reroutes the function calls.
"""
if gpu_indices is None:
gpu_indices = range(len(blobs))
if len(gpu_indices) != len(blobs):
raise RuntimeError(
"gpu_indices length and blobs length mismatch: %d vs %d" %
(len(gpu_indices), len(blobs))
)
if len(blobs) == 2:
return Allreduce2(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 4:
return Allreduce4(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 8:
return Allreduce8(net, blobs, reduced_affix, gpu_indices)
else:
return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)
def Allreduce2(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 2 gpus.
Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
"""
a, b = blobs
gpu_a, gpu_b = gpu_indices
a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))
b_reduced = a_reduced.Copy(
[],
b + reduced_affix,
device_option=OnGPU(gpu_b)
)
return a_reduced, b_reduced
def Allreduce4(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 4 gpus.
Algorithm: 2 level reduction.
0r <- 0 + 1, 2r <- 2 + 3
0r <- 0r + 2r
2r <- 0r,
1r <- 0r, 3r <- 2r
"""
a, b, c, d = blobs
gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices
# a_reduced <- a+b, c_reduced <- c + d
a_reduced = net.Add(
[a, b],
str(a) + reduced_affix,
device_option=OnGPU(gpu_a)
)
c_reduced = net.Add(
[c, d],
str(c) + reduced_affix,
device_option=OnGPU(gpu_c)
)
# a_reduced <- a_reduced + c_reduced
a_reduced = a_reduced.Add(c_reduced, a_reduced, device_option=OnGPU(gpu_a))
# broadcast a_reduced to c_reduced
c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))
# broadcast to b and d
b_reduced = a_reduced.Copy(
[],
str(b) + reduced_affix,
device_option=OnGPU(gpu_b)
)
d_reduced = c_reduced.Copy(
[],
str(d) + reduced_affix,
device_option=OnGPU(gpu_d)
)
return a_reduced, b_reduced, c_reduced, d_reduced
def Allreduce8(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 8 gpus.
Algorithm: 3 level reduction.
0r <- 0 + 1, 2r <- 2 + 3, 4r <- 4 + 5, 6r <- 6 + 7
0r <- 0r + 2r, 4r <- 4r + 6r
0r <- 0r + 4r
4r <- 0r
2r <- 0r, 6r <- 4r
1r <- 0r, 3r <- 2r, 5r <- 4r, 7r <- 6r
"""
reduced = [None] * 8
# Reduction level 1
for i in [0, 2, 4, 6]:
reduced[i] = net.Add(
[blobs[i], blobs[i + 1]],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 2
for i in [0, 4]:
reduced[i] = net.Add(
[reduced[i], reduced[i + 2]],
str(blobs[i]) + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 3: this involves a copy.
reduced_4_copy = reduced[4].Copy(
[],
str(reduced[4]) + '_copy',
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = reduced[0].Add(
reduced_4_copy,
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast level 1
reduced[4] = reduced[0].Copy(
[],
reduced[4],
device_option=OnGPU(gpu_indices[4])
)
# Broadcast level 2
for i in [2, 6]:
reduced[i] = reduced[i - 2].Copy(
[],
reduced[i],
device_option=OnGPU(gpu_indices[i])
)
# Broadcast level 3
for i in [1, 3, 5, 7]:
reduced[i] = reduced[i - 1].Copy(
[],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
def AllreduceFallback(net, blobs, reduced_affix, gpu_indices):
"""A fallback option for Allreduce with no assumption on p2p.
Algorithm: a flat operation on gpu 0
0r <- 0
0r <- 0r + i for i in gpu_indices[1:]
ir <- 0r for i in gpu_indices[1:]
"""
reduced = [None] * len(gpu_indices)
# copy first
reduced[0] = net.Copy(
blobs[0],
blobs[0] + reduced_affix,
device_option=OnGPU(gpu_indices[0])
)
# do temp copy and add
temp_name = reduced[0] + '_temp_copy'
for i in range(1, len(gpu_indices)):
temp = net.Copy(
blobs[i],
temp_name,
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = reduced[0].Add(
temp,
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast to everyone else
for i in range(1, len(gpu_indices)):
reduced[i] = net.Copy(
reduced[0],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, data_parallel_model, cnn, rnn_cell
from caffe2.python.test_util import TestCase
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class GPUDataParallelModelTest(TestCase):
def run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
fc = model.FC("data", "fc", 16, 1,
("ConstantFill", {}), ("ConstantFill", {}))
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, grad, LR], param)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="test{}".format(gpu_devices),
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
np.random.seed(2603)
# Each run has same input, independent of number of gpus
batch_size = 64
for i in range(0, 10):
full_data = np.random.rand(batch_size, 16)
full_labels = np.round(full_data[:, 0])
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
print(i, workspace.FetchBlob("gpu_0/fc_w").flatten()[:5])
workspace.RunNet(model.net.Proto().name)
return workspace.FetchBlob("gpu_0/fc_w")
def test_equiv(self):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
result_2gpus = self.run_model([0, 1])
result_1gpus = self.run_model([0])
self.assertTrue(np.allclose(result_1gpus, result_2gpus))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(range(4))
self.assertTrue(np.allclose(result_1gpus, result_4gpus))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(range(8))
self.assertTrue(np.allclose(result_1gpus, result_8gpus))
def test_checkpoint_params(self):
def add_input_ops(model):
pass
def add_model_ops(model, loss_scale):
model.NHWC2NCHW("data", "data_nchw")
model.Conv("data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3)
model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')
model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
model.Sigmoid('fc', 'fc_sigm')
model.Softmax('fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
# Add a duplicate param init to ensure it does not cause issues
model.param_init_net.ConstantFill(
[], ["fc_w"], shape=((64 * 56 * 56), 1000)
)
return [loss]
def add_parameter_update_ops(model):
model.Iter("ITER")
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
model = cnn.CNNModelHelper(
order="NHWC",
name="test",
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=add_input_ops,
forward_pass_builder_fun=add_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=[1, 2, 3],
)
# Only gpu_1 params should be returned (gpu_1 is the first gpu)
checkpoint_params = data_parallel_model.GetCheckpointParams(model)
for p in model.GetParams("gpu_1/"):
self.assertTrue(p in checkpoint_params)
self.assertTrue(p + "_momentum" in checkpoint_params)
for p in model.GetParams("gpu_2/"):
self.assertFalse(p in checkpoint_params)
for c in model.GetComputedParams("gpu_1/"):
self.assertTrue(c in checkpoint_params)
for c in model.GetComputedParams("gpu_2/"):
self.assertFalse(c in checkpoint_params)
self.assertFalse(core.BlobReference("gpu_1/data") in checkpoint_params)
self.assertTrue(core.BlobReference("gpu_1/ITER") in checkpoint_params)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class RecurrentNetworkParallelTest(TestCase):
def run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
workspace.FeedBlob(
core.ScopedBlobReference("seq_lengths"),
np.array([self.T] * self.batch_per_device, dtype=np.int32)
)
model.param_init_net.ConstantFill(
[],
"hidden_init",
value=0.0,
shape=[1, self.batch_per_device, self.hidden_dim]
)
model.param_init_net.ConstantFill(
[],
"cell_init",
value=0.0,
shape=[1, self.batch_per_device, self.hidden_dim]
)
output, _last_hidden, _, _last_state, = rnn_cell.LSTM(
model=model,
input_blob="data",
seq_lengths="seq_lengths",
initial_states=("hidden_init", "cell_init"),
dim_in=self.input_dim,
dim_out=self.hidden_dim,
scope="partest",
)
# A silly loss function
loss = model.AveragedLoss(
model.Sub([output, "target"], "dist"),
"loss",
)
loss = model.Scale(loss, "loss_scaled", scale=loss_scale)
return [loss]
def param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
assert len(model.GetParams()) == len(model.params) // len(model._devices)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
name="recurrent_test{}".format(gpu_devices),
)
self.T = 8
self.batch_size = 64
self.input_dim = 8
self.hidden_dim = 31
self.batch_per_device = self.batch_size // len(gpu_devices)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
optimize_gradient_memory=True,
)
# Change all initialization to be ConstantFills so that
# the everything is deterministic
for op in model.param_init_net.Proto().op:
if op.type.endswith('Fill'):
op.type = 'ConstantFill'
# Each run has same input, independent of number of gpus
np.random.seed(20150210)
for i in range(0, 10):
full_data = np.random.rand(self.T, self.batch_size, self.input_dim)
full_target = np.random.rand(
self.T, self.batch_size, self.hidden_dim
)
for (j, g) in enumerate(gpu_devices):
st = j * self.batch_per_device
en = st + self.batch_per_device
data = full_data[:, st:en, :].astype(np.float32)
targets = full_target[:, st:en, :].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/target".format(g), targets)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
return workspace.FetchBlob("gpu_0/partest/i2h_w")
def test_equiv_recurrent(self):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
result_2gpus = self.run_model([0, 1])
result_1gpus = self.run_model([0])
print("result 1", result_1gpus.flatten()[:5])
print("result 2", result_2gpus.flatten()[:5])
self.assertTrue(np.allclose(result_1gpus, result_2gpus))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(range(4))
self.assertTrue(np.allclose(result_1gpus, result_4gpus))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(range(8))
self.assertTrue(np.allclose(result_1gpus, result_8gpus))
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class SparseDataParallelModelTest(TestCase):
'''
Create and run the model. We try with both storing indices for gather
on CPU and on GPU
'''
def run_model(self, V, gpu_devices, cpu_indices):
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
if cpu_indices:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
gathered_cpu = model.net.Gather(
[self.vecs, 'indices'], 'gathered_cpu')
gathered = model.CopyCPUToGPU(gathered_cpu, "gathered")
else:
gpu_vecs = model.param_init_net.CopyCPUToGPU(
self.vecs, "gpuvecs",
)
model.params.append(gpu_vecs)
gathered = model.net.Gather([gpu_vecs, 'indices'], 'gathered')
flattened = model.Flatten(gathered, "flattened")
fc = model.FC(flattened, "fc", 16 * 16, 1,
("ConstantFill", {}), ("ConstantFill", {}))
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def param_update_fun(model):
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
LR = model.CopyCPUToGPU(self.LR, "LR")
for param in model.GetParams():
param_grad = model.param_to_grad[param]
if not isinstance(param_grad, core.GradientSlice):
model.WeightedSum([param, ONE, param_grad, LR], param)
else:
param_momentum = model.param_init_net.ConstantFill(
[param],
param + '_momentum',
value=0.0,
)
model.net.SparseMomentumSGDUpdate(
[
param_grad.values,
param_momentum,
LR,
param,
param_grad.indices,
],
[
param_grad.values, param_momentum, param
],
momentum=0.1,
nesterov=0,
)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="sparse_test{}".format(gpu_devices),
)
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.ITER = model.Iter("ITER")
self.LR = model.net.LearningRate(
[self.ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
self.vecs = model.param_init_net.UniformFill(
[], "vecs", shape=[V, 16])
if cpu_indices:
model.params.append(self.vecs)
self.ONE_CPU = model.param_init_net.ConstantFill(
[], "ONE_CPU", shape=[1], value=1.0,
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
# Update the vecs
if cpu_indices:
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
for param in model.GetParams():
param_grad = model.param_to_grad[param]
model.ScatterWeightedSum([param, self.ONE_CPU,
param_grad.indices,
param_grad.values,
self.LR],
self.vecs)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
model.CopyGPUToCPU("gpu_0/gpuvecs", self.vecs)
np.random.seed(2603)
# Each run has same input, independent of number of gpus
batch_size = 64
for i in range(0, 10):
full_indices = np.random.permutation(V)[:batch_size * 16].reshape(
batch_size, 16
)
full_labels = full_indices[:, 0] % 2
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
indices = full_indices[st:en, :].astype(np.int32)
labels = full_labels[st:en].astype(np.float32)
device_for_indices = core.DeviceOption(caffe2_pb2.CPU)
if not cpu_indices:
device_for_indices = core.DeviceOption(caffe2_pb2.CUDA, g)
with core.DeviceScope(device_for_indices):
workspace.FeedBlob("gpu_{}/indices".format(g), indices)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
# Force vecs to be same on all runs
orig_vecs = np.random.rand(V, 16).astype(np.float32)
workspace.FeedBlob(
self.vecs,
orig_vecs
)
if not cpu_indices:
for g in gpu_devices:
workspace.FeedBlob(
"gpu_{}/gpuvecs".format(g),
orig_vecs,
device_option=core.DeviceOption(caffe2_pb2.CUDA, g),
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
if len(gpu_devices) == 2:
open("dump.txt", "w").write(str(model.net.Proto()))
if not cpu_indices:
idx = workspace.FetchBlob("gpu_0/indices")
idx = list(idx.flatten())
n = len(idx)
nu = len(set(idx))
assert n == nu, "We cannot have duplicate indices"
# Sanity check to see the vecs were updated
self.assertFalse(
np.allclose(workspace.FetchBlob(self.vecs), orig_vecs))
return [workspace.FetchBlob(self.vecs if cpu_indices else "gpu_0/gpuvecs"),
workspace.FetchBlob("gpu_0/fc_w")]
def _test_equiv_sparse(self, cpu_indices):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
V = 10000
result_2gpus = self.run_model(V, [0, 1], cpu_indices)
result_1gpus = self.run_model(V, [0], cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_2gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_2gpus[1]))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(V, range(4), cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_4gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_4gpus[1]))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(V, range(8), cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_8gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_8gpus[1]))
def test_equiv_sparse(self):
self._test_equiv_sparse(True)
self._test_equiv_sparse(False)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class ParallelizeGPUBMUFTest(TestCase):
def _run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def _model_build_fun(self, model, loss_scale):
fc = model.FC(
"data", "fc", 16, 1, ("ConstantFill", {}), ("ConstantFill", {})
)
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def _param_update_fun(self, model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, grad, LR], param)
def _generate_data(self, gpu_devices):
np.random.seed(26)
# Each run has same input, independent of number of gpus
batch_size = 64
for _ in range(0, 10):
full_data = np.random.rand(batch_size, 16)
full_labels = np.round(full_data[:, 0])
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
def test_parallelize_gpu_bmuf(self):
model = cnn.CNNModelHelper(
order="NHWC",
name="test"
)
gpu_ids = [0, 1]
def input_builder_fun(model):
return None
self._generate_data(gpu_ids)
data_parallel_model.Parallelize_GPU_BMUF(
model,
input_builder_fun,
self._model_build_fun,
self._param_update_fun,
devices=gpu_ids,
)
data_parallel_model.RunInitNet(model)
# Check initial momentum params are zeros
self.assertEqual(model._device_grouped_blobs.keys(), ['fc_w', 'fc_b'])
self.assertEqual(workspace.FetchBlob('gpu_0/fc_b_v'), 0)
np.testing.assert_equal(
workspace.FetchBlob('gpu_0/fc_w_v'),
np.zeros(16).astype(np.float32).reshape(1, 16)
)
# Run the algorithm for one iteration to have non-zero params.
data_parallel_model.RunNet(model, 1)
# Save iteration momentum and post local update params
v_b_ = workspace.FetchBlob('gpu_0/fc_b_v')
v_w_ = workspace.FetchBlob('gpu_0/fc_w_v')
workspace.RunNetOnce(model.net)
b_0_ = workspace.FetchBlob('gpu_0/fc_b')
w_0_ = workspace.FetchBlob('gpu_0/fc_w')
b_1_ = workspace.FetchBlob('gpu_1/fc_b')
w_1_ = workspace.FetchBlob('gpu_1/fc_w')
def getBlockAvg(param_name):
param_0 = workspace.FetchBlob("gpu_0/{}".format(param_name))
param_1 = workspace.FetchBlob("gpu_1/{}".format(param_name))
return (param_0 + param_1) / 2
# Compute block gradients.
b_g_ = workspace.FetchBlob('gpu_0/fc_b_g')
w_g_ = workspace.FetchBlob('gpu_0/fc_w_g')
workspace.RunNetOnce(model._global_model_param_updates_net)
g_b = (b_0_ + b_1_) / 2 - b_g_
g_w = (w_0_ + w_1_) / 2 - w_g_
v_b = workspace.FetchBlob('gpu_0/fc_b_v')
v_w = workspace.FetchBlob('gpu_0/fc_w_v')
w_g = workspace.FetchBlob('gpu_0/fc_w_g')
b_g = workspace.FetchBlob('gpu_0/fc_b_g')
w_0 = workspace.FetchBlob('gpu_0/fc_w')
b_0 = workspace.FetchBlob('gpu_0/fc_b')
w_1 = workspace.FetchBlob('gpu_1/fc_w')
b_1 = workspace.FetchBlob('gpu_1/fc_b')
# Check momentum update step
np.testing.assert_equal(v_b, 0.5 * v_b_ + g_b)
np.testing.assert_equal(v_w, 0.5 * v_w_ + g_w)
np.testing.assert_equal(w_g, w_0)
np.testing.assert_equal(w_g, w_1)
np.testing.assert_equal(b_g, b_0)
np.testing.assert_equal(b_g, b_1)
# Check params update step
np.testing.assert_equal(w_0, w_g_ + v_w)
np.testing.assert_equal(b_0, b_g_ + v_b)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class SparseDataParallelModelTestWithSharedIndices(TestCase):
'''
Create and run the model. We try with both storing indices for gather
on CPU and on GPU
'''
def run_model(self, V, gpu_devices):
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
gpu_vecs_gathered = []
gpu_vecs = []
for num, vec in enumerate(self.vecs):
gpu_vec = model.param_init_net.CopyCPUToGPU(
vec, 'gpuvec_{}'.format(num),
)
if num != 2:
model.params.append(gpu_vec)
gpu_vecs.append(gpu_vec)
for num, gpu_vec in enumerate(gpu_vecs):
gpu_vec_gathered = model.net.Gather(
[gpu_vec, 'indices'],
['gpu_vec_gathered_{}'.format(num)]
)
gpu_vecs_gathered.append(gpu_vec_gathered)
assert len(gpu_vecs_gathered) == 3
fc = model.net.FC(
[
gpu_vecs_gathered[2],
gpu_vecs_gathered[0],
gpu_vecs_gathered[1],
],
['fc'],
)
_, loss = model.net.SoftmaxWithLoss(
[fc, 'label'],
['ce_loss', 'avg_loss'],
only_loss=True,
)
loss = model.Scale(loss, scale=loss_scale)
model.net.Print(loss, [], limit=10)
return [loss]
def param_update_fun(model):
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
LR = model.CopyCPUToGPU(self.LR, "LR")
for param in model.GetParams():
param_grad = model.param_to_grad[param]
if not isinstance(param_grad, core.GradientSlice):
model.WeightedSum([param, ONE, param_grad, LR], param)
else:
model.net.ScatterWeightedSum(
[
param,
ONE,
param_grad.indices,
param_grad.values,
ONE,
],
param,
)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="sparse_test{}".format(gpu_devices),
)
batch_size = 32
batch_per_device = batch_size // len(gpu_devices)
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.ITER = model.Iter("ITER")
self.LR = model.net.LearningRate(
[self.ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
'''
self.vecs consists of 3 big blobs on which we call Gather:
1) FC weights, shape=(V, 16)
2) FC bias, shape=(V)
3) FC input, shape=(batch_per_device, 16)
'''
self.vecs = [
model.param_init_net.UniformFill(
[], "vec_{}".format(num), shape=[V, 16])
for num in range(2)
]
self.vecs.append(
model.param_init_net.UniformFill(
[],
"vec_2", shape=[batch_per_device, 16]
)
)
self.ONE_CPU = model.param_init_net.ConstantFill(
[], "ONE_CPU", shape=[1], value=1.0,
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
# Update the vecs
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
for num, vec in enumerate(self.vecs[:-1]):
model.CopyGPUToCPU("gpu_0/gpuvec_{}".format(num), vec)
# Each run has same input, independent of number of gpus
for i in range(0, 10):
np.random.seed(2603)
full_indices = np.random.permutation(V)[:batch_size].reshape(
batch_size
)
full_labels = full_indices[:] % batch_per_device
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
indices = full_indices[st:en].astype(np.int32)
labels = full_labels[st:en].astype(np.int32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/indices".format(g), indices)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
# Force vecs to be same on all runs
orig_vecs = [
np.random.rand(V, 16).astype(np.float32),
np.random.rand(V).astype(np.float32),
np.random.rand(V, 16).astype(np.float32),
]
for vec, orig_vec in zip(self.vecs, orig_vecs):
workspace.FeedBlob(
vec,
orig_vec
)
for g in gpu_devices:
for num, orig_vec in enumerate(orig_vecs):
workspace.FeedBlob(
"gpu_{}/gpuvec_{}".format(g, num),
orig_vec,
device_option=core.DeviceOption(
caffe2_pb2.CUDA, g),
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
idx = workspace.FetchBlob('gpu_0/indices')
grad_slices = [
workspace.FetchBlob(
'gpu_{}/gpu_vec_gathered_{}_grad'.format(g, num))
for g in gpu_devices for num in range(2)
]
for grad_slice in grad_slices:
# print (len(idx), len(grad_slice))
assert len(idx) == len(grad_slice), (
'Number of indices {} is not same as number of gradient '
'slices {}. This might lead to illegal memory access'.format(
len(idx), len(grad_slice)
)
)
def test_sparse_shared_indices_gpu(self):
'''
Test that the model has same number of indices and gradient rows
given total batchsize, independent of number of GPUs.
'''
V = 10000
self.run_model(V, [0, 1])
self.run_model(V, [0])
if workspace.NumCudaDevices() >= 4:
self.run_model(V, range(4))
if workspace.NumCudaDevices() >= 8:
self.run_model(V, range(8))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
schema,
workspace,
)
from caffe2.python.layers.layers import (
InstantiationContext,
)
from caffe2.python.layers.tags import Tags
from caffe2.python.layer_test_util import (
LayersTestCase,
OpSpec,
)
from caffe2.python.layers.layers import (
set_request_only,
is_request_only_scalar,
)
class TestLayers(LayersTestCase):
def testFCWithoutBias(self):
output_dims = 2
fc_without_bias = self.model.FCWithoutBias(
self.model.input_feature_schema.float_features, output_dims)
self.model.output_schema = fc_without_bias
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
fc_without_bias
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
]
)
mat_mul_spec = OpSpec(
"MatMul",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
],
fc_without_bias.field_blobs()
)
self.assertNetContainOps(train_net, [mat_mul_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [mat_mul_spec])
def testSamplingTrain(self):
output_dims = 1000
indices = self.new_record(schema.Scalar((np.int32, (10,))))
sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))
sampled_fc = self.model.SamplingTrain(
schema.Struct(
('input', self.model.input_feature_schema.float_features),
('indices', indices),
('sampling_prob', sampling_prob),
),
"FC",
output_dims,
)
self.model.output_schema = sampled_fc
# Check that we don't add prediction layer into the model
self.assertEqual(1, len(self.model.layers))
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
sampled_fc
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("UniformFill", None, None),
]
)
sampled_fc_layer = self.model.layers[0]
gather_w_spec = OpSpec(
"Gather",
[
init_ops[0].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[0]
]
)
gather_b_spec = OpSpec(
"Gather",
[
init_ops[1].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[1]
]
)
train_fc_spec = OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
] + sampled_fc_layer._prediction_layer.train_param_blobs,
sampled_fc.field_blobs()
)
log_spec = OpSpec("Log", [sampling_prob()], [None])
sub_spec = OpSpec(
"Sub",
[sampled_fc.field_blobs()[0], None],
sampled_fc.field_blobs()
)
train_ops = self.assertNetContainOps(
train_net,
[gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])
self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[
OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
init_ops[1].output[0],
],
sampled_fc.field_blobs()
)
]
)
def testBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchMSELoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
))
loss = self.model.BatchMSELoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSigmoidCrossEntropyLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (32,)))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSoftmaxLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=1, max_value=10),
)
def testLastNWindowCollector(self, X, num_to_collect):
input_record = self.new_record(schema.Scalar(np.float32))
schema.FeedRecord(input_record, [X])
last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(last_n)
start = max(0, 5 - num_to_collect)
npt.assert_array_equal(X[start:], output_record())
def testUniformSampling(self):
input_record = self.new_record(schema.Scalar(np.int32))
input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
schema.FeedRecord(input_record, [input_array])
num_samples = 20
num_elements = 100
uniform_sampling_output = self.model.UniformSampling(
input_record, num_samples, num_elements)
self.model.loss = uniform_sampling_output
self.run_train_net()
samples = workspace.FetchBlob(uniform_sampling_output.samples())
sampling_prob = workspace.FetchBlob(
uniform_sampling_output.sampling_prob())
self.assertEqual(num_samples, len(samples))
np.testing.assert_array_equal(input_array, samples[:len(input_array)])
np.testing.assert_almost_equal(
np.array([float(num_samples) / num_elements] * num_samples,
dtype=np.float32),
sampling_prob
)
def testGatherRecord(self):
indices = np.array([1, 3, 4], dtype=np.int32)
dense = np.array(range(20), dtype=np.float32).reshape(10, 2)
lengths = np.array(range(10), dtype=np.int32)
items = np.array(range(lengths.sum()), dtype=np.int64)
items_lengths = np.array(range(lengths.sum()), dtype=np.int32)
items_items = np.array(range(items_lengths.sum()), dtype=np.int64)
record = self.new_record(schema.Struct(
('dense', schema.Scalar(np.float32)),
('sparse', schema.Struct(
('list', schema.List(np.int64)),
('list_of_list', schema.List(schema.List(np.int64))),
)),
('empty_struct', schema.Struct())
))
indices_record = self.new_record(schema.Scalar(np.int32))
input_record = schema.Struct(
('indices', indices_record),
('record', record),
)
schema.FeedRecord(
input_record,
[indices, dense, lengths, items, lengths, items_lengths,
items_items])
gathered_record = self.model.GatherRecord(input_record)
self.assertTrue(schema.equal_schemas(gathered_record, record))
self.run_train_net_forward_only()
gathered_dense = workspace.FetchBlob(gathered_record.dense())
np.testing.assert_array_equal(
np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
gathered_lengths = workspace.FetchBlob(
gathered_record.sparse.list.lengths())
np.testing.assert_array_equal(
np.concatenate([lengths[i:i + 1] for i in indices]),
gathered_lengths)
gathered_items = workspace.FetchBlob(
gathered_record.sparse.list.items())
offsets = lengths.cumsum() - lengths
np.testing.assert_array_equal(
np.concatenate([
items[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]), gathered_items)
gathered_items_lengths = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.lengths())
np.testing.assert_array_equal(
np.concatenate([
items_lengths[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]),
gathered_items_lengths
)
nested_offsets = []
nested_lengths = []
nested_offset = 0
j = 0
for l in lengths:
nested_offsets.append(nested_offset)
nested_length = 0
for _i in range(l):
nested_offset += items_lengths[j]
nested_length += items_lengths[j]
j += 1
nested_lengths.append(nested_length)
gathered_items_items = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.items())
np.testing.assert_array_equal(
np.concatenate([
items_items[nested_offsets[i]:
nested_offsets[i] + nested_lengths[i]]
for i in indices
]),
gathered_items_items
)
def testMapToRange(self):
input_record = self.new_record(schema.Scalar(np.int32))
map_to_range_output = self.model.MapToRange(input_record,
max_index=100)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
schema.FeedRecord(
input_record,
[np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
indices
)
eval_net = self.get_eval_net()
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
indices
)
predict_net = self.get_predict_net()
schema.FeedRecord(
input_record,
[np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(predict_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
indices
)
def testSelectRecordByContext(self):
float_features = self.model.input_feature_schema.float_features
float_array = np.array([1.0, 2.0], dtype=np.float32)
schema.FeedRecord(float_features, [float_array])
with Tags(Tags.EXCLUDE_FROM_PREDICTION):
log_float_features, = self.model.Log(float_features, 1)
joined = self.model.SelectRecordByContext(
schema.Struct(
(InstantiationContext.PREDICTION, float_features),
(InstantiationContext.TRAINING, log_float_features),
# TODO: TRAIN_ONLY layers are also generated in eval
(InstantiationContext.EVAL, log_float_features),
)
)
# model.output_schema has to a struct
self.model.output_schema = schema.Struct((
'joined', joined
))
predict_net = layer_model_instantiator.generate_predict_net(self.model)
workspace.RunNetOnce(predict_net)
predict_output = schema.FetchRecord(predict_net.output_record())
npt.assert_array_equal(float_array,
predict_output['joined']())
eval_net = layer_model_instantiator.generate_eval_net(self.model)
workspace.RunNetOnce(eval_net)
eval_output = schema.FetchRecord(eval_net.output_record())
npt.assert_array_equal(np.log(float_array),
eval_output['joined']())
_, train_net = (
layer_model_instantiator.generate_training_nets_forward_only(
self.model
)
)
workspace.RunNetOnce(train_net)
train_output = schema.FetchRecord(train_net.output_record())
npt.assert_array_equal(np.log(float_array),
train_output['joined']())
def testFunctionalLayer(self):
def normalize(net, in_record, out_record):
mean = net.ReduceFrontMean(in_record(), 1)
net.Sub(
[in_record(), mean],
out_record[0](),
broadcast=1)
normalized = self.model.Functional(
self.model.input_feature_schema.float_features, 1,
normalize, name="normalizer")
# Attach metadata to one of the outputs and use it in FC
normalized[0].set_type((np.float32, 32))
self.model.output_schema = self.model.FC(normalized[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelper(self):
mean = self.model.ReduceFrontMean(
self.model.input_feature_schema.float_features, 1)
normalized = self.model.Sub(
schema.Tuple(
self.model.input_feature_schema.float_features, mean[0]),
1, broadcast=1)
# Attach metadata to one of the outputs and use it in FC
normalized[0].set_type((np.float32, (32,)))
self.model.output_schema = self.model.FC(normalized[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelperAutoInference(self):
softsign = self.model.Softsign(
schema.Tuple(self.model.input_feature_schema.float_features),
1)
assert len(softsign.field_types()) == 1
assert softsign.field_types()[0].base == np.float32
assert softsign.field_types()[0].shape == (32,)
self.model.output_schema = self.model.FC(softsign[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 2
assert ops[0].type == "Softsign"
assert ops[1].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[0].output) == 1
assert ops[0].output[0] in ops[1].input
def testFunctionalLayerHelperAutoInferenceScalar(self):
loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual(tuple(), loss.field_types()[0].shape)
def testFunctionalLayerInputCoercion(self):
one = self.model.global_constants['ONE']
two = self.model.Add([one, one], 1)
self.model.loss = two
self.run_train_net()
data = workspace.FetchBlob(two.field_blobs()[0])
np.testing.assert_array_equal([2.0], data)
def testFunctionalLayerWithOutputNames(self):
k = 3
topk = self.model.TopK(
self.model.input_feature_schema,
output_names_or_num=['values', 'indices'],
k=k,
)
self.assertEqual(2, len(topk.field_types()))
self.assertEqual(np.float32, topk.field_types()[0].base)
self.assertEqual((k,), topk.field_types()[0].shape)
self.assertEqual(np.int32, topk.field_types()[1].base)
self.assertEqual((k,), topk.field_types()[1].shape)
self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())
def testFunctionalLayerWithOutputDtypes(self):
loss = self.model.AveragedLoss(
self.model.input_feature_schema,
1,
output_dtypes=(np.float32, (1,)),
)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual((1,), loss.field_types()[0].shape)
def testPropagateRequestOnly(self):
# test case when output is request only
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (32, )))),
('input2', schema.Scalar((np.float32, (64, )))),
('input3', schema.Scalar((np.float32, (16, )))),
))
set_request_only(input_record)
concat_output = self.model.Concat(input_record)
self.assertEqual(is_request_only_scalar(concat_output), True)
# test case when output is not request only
input_record2 = self.new_record(schema.Struct(
('input4', schema.Scalar((np.float32, (100, ))))
)) + input_record
concat_output2 = self.model.Concat(input_record2)
self.assertEqual(is_request_only_scalar(concat_output2), False)
def testSetRequestOnly(self):
input_record = schema.Scalar(np.int64)
schema.attach_metadata_to_scalars(
input_record,
schema.Metadata(
categorical_limit=100000000,
expected_value=99,
feature_specs=schema.FeatureSpec(
feature_ids=[1, 100, 1001]
)
)
)
set_request_only(input_record)
self.assertEqual(input_record.metadata.categorical_limit, 100000000)
self.assertEqual(input_record.metadata.expected_value, 99)
self.assertEqual(
input_record.metadata.feature_specs.feature_ids,
[1, 100, 1001]
)
|
# TODO(jiayq): as more and more tests are moving to hypothesis test, we
# can gradually remove this test script. DO NOT ADD MORE TESTS TO THIS
# FILE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import \
core, device_checker, gradient_checker, test_util, workspace, cnn
import caffe2.python.hypothesis_test_util as hu
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
import collections
import unittest
if workspace.has_gpu_support and workspace.NumCudaDevices() > 0:
gpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option.device_type = caffe2_pb2.CUDA
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option]
)
device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option, cpu_device_option]
)
gpu_gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
]
gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
),
]
else:
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option = None
gpu_device_checker = device_checker.DeviceChecker(
0.01, []
)
device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
gradient_checkers = [
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
)
]
gpu_gradient_checkers = []
class TestLRN(test_util.TestCase):
def setUp(self):
self.test_configs = [(6, 10), (3, 13), ]
def testLRN(self):
for input_size, depth in self.test_configs:
op = core.CreateOperator("LRN",
["X"],
["Y", "Y_scale"],
size=11,
alpha=0.001,
beta=0.5,
bias=2.0,
order="NHWC"
)
X = np.random.rand(2, input_size, input_size,
depth).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
def testFlatten(self):
op = core.CreateOperator("Flatten", ["X"], ["Y"])
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input_size, depth1, depth2, depth3, depth4
(3, 2, 3, 4, 5),
(4, 5, 4, 3, 2),
]
def testConcatNHWC(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NHWC"
)
Xs = [
np.random.rand(2, input_size, input_size,
d1).astype(np.float32),
np.random.rand(2, input_size, input_size,
d2).astype(np.float32),
np.random.rand(2, input_size, input_size,
d3).astype(np.float32),
np.random.rand(2, input_size, input_size, d4).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NCHW"
)
Xs = [
np.random.rand(2, d1, input_size,
input_size).astype(np.float32),
np.random.rand(2, d2, input_size,
input_size).astype(np.float32),
np.random.rand(2, d3, input_size,
input_size).astype(np.float32),
np.random.rand(2, d4, input_size, input_size).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1, 1),
(2, 1),
(1, 3, 3, 1),
(2, 3, 3, 1),
(1, 5, 5, 3),
(2, 5, 5, 3),
]
def testRelu(self):
for input_size in self.test_configs:
op = core.CreateOperator("Relu", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testTanh(self):
for input_size in self.test_configs:
op = core.CreateOperator("Tanh", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestExp(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testExp(self):
for input_size in self.test_configs:
op = core.CreateOperator("Exp", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSigmoid(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testSigmoid(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sigmoid", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [
# ((0, 1), False),
((1, 2, 3, 4), True),
((1, 2, 3, 4), False)]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1 = np.random.rand(*input_size).astype(np.float32) - 0.5
X2 = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(
op, [X1, X2], 0, [0])
self.assertTrue(res)
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1,),
(7,),
(1, 3),
(2, 5),
]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator("MakeTwoClass", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# step a little to avoid gradient problems
X[X < 0.01] += 0.01
X[X > 0.99] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestNetGradientChecker(test_util.TestCase):
def test_net_gradient_checker(self):
model = cnn.CNNModelHelper(name="test")
const = model.net.AddExternalInputs("const1", "const2")
fc = model.FC(dim_in=3, dim_out=4, blob_in="X", blob_out="Y", axis=0)
dist = [model.net.SquaredL2Distance([fc, c]) for c in const]
losses = [model.net.AveragedLoss(d) for d in dist] # using two losses here
workspace.RunNetOnce(model.param_init_net)
gradient_checker.NetGradientChecker.Check(
model.net,
outputs_with_grad=losses,
input_values={"X": np.array([1, 2, 3], dtype="float32"),
const[0]: np.array([1, 1, 1, 1], dtype="float32"),
const[1]: np.array([2, 2, 2, 2], dtype="float32")},
input_to_check="X",
)
if __name__ == '__main__':
workspace.GlobalInit(["python"])
unittest.main()
|
## @package attention
# Module caffe2.python.attention
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew
class AttentionType:
Regular, Recurrent = range(2)
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
# c_i = \sum_j w_{ij}\textbf{s}_j
def _calc_weighted_context(
model,
encoder_outputs_transposed,
encoder_output_dim,
attention_weights_3d,
scope,
):
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = model.net.BatchMatMul(
[encoder_outputs_transposed, attention_weights_3d],
s(scope, 'attention_weighted_encoder_context'),
)
# [batch_size, encoder_output_dim]
attention_weighted_encoder_context, _ = model.net.Reshape(
attention_weighted_encoder_context,
[
attention_weighted_encoder_context,
s(scope, 'attention_weighted_encoder_context_old_shape'),
],
shape=[1, -1, encoder_output_dim],
)
return attention_weighted_encoder_context
# Calculate a softmax over the passed in attention energy logits
def _calc_attention_weights(
model,
attention_logits_transposed,
scope,
):
# TODO: we could try to force some attention weights to be zeros,
# based on encoder_lengths.
# [batch_size, encoder_length, 1]
attention_weights_3d = brew.softmax(
model,
attention_logits_transposed,
s(scope, 'attention_weights_3d'),
engine='CUDNN',
axis=1,
)
return attention_weights_3d
# e_{ij} = \textbf{v}^T tanh \alpha(\textbf{h}_{i-1}, \textbf{s}_j)
def _calc_attention_logits_from_sum_match(
model,
decoder_hidden_encoder_outputs_sum,
encoder_output_dim,
scope,
):
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Tanh(
decoder_hidden_encoder_outputs_sum,
decoder_hidden_encoder_outputs_sum,
)
attention_v = model.param_init_net.XavierFill(
[],
s(scope, 'attention_v'),
shape=[1, encoder_output_dim],
)
model.add_param(attention_v)
attention_zeros = model.param_init_net.ConstantFill(
[],
s(scope, 'attention_zeros'),
value=0.0,
shape=[1],
)
# [encoder_length, batch_size, 1]
attention_logits = model.net.FC(
[decoder_hidden_encoder_outputs_sum, attention_v, attention_zeros],
[s(scope, 'attention_logits')],
axis=2,
)
# [batch_size, encoder_length, 1]
attention_logits_transposed = model.Transpose(
attention_logits,
s(scope, 'attention_logits_transposed'),
axes=[1, 0, 2],
)
return attention_logits_transposed
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name,
):
output = brew.fc(
model,
input,
s(scope, name),
dim_in=dim_in,
dim_out=dim_out,
axis=2,
)
output = model.net.Squeeze(
output,
output,
dims=[0],
)
return output
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
attention_weighted_encoder_context_t_prev,
scope,
):
weighted_prev_attention_context = _apply_fc_weight_for_sum_match(
model=model,
input=attention_weighted_encoder_context_t_prev,
dim_in=encoder_output_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_prev_attention_context',
)
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum_tmp = model.net.Add(
[
weighted_prev_attention_context,
weighted_decoder_hidden_state,
],
s(scope, 'decoder_hidden_encoder_outputs_sum_tmp'),
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[
weighted_encoder_outputs,
decoder_hidden_encoder_outputs_sum_tmp,
],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
def apply_regular_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
|
## @package task
# Module caffe2.python.task
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, context
from caffe2.python.schema import Field, from_blob_list
from collections import defaultdict
from copy import copy
def _merge_node_kwargs(a, b):
# TODO(azzolini): consistency checks
if a is None:
return b
if b is None:
return a
c = copy(a)
c.update(b)
return c
@context.define_context(allow_default=True)
class Cluster(object):
"""
Context that keeps track of all the node names used.
Users shouldn't have to use them directly, since a Cluster is automatically
generated at the first usage of 'Node'.
"""
def __init__(self):
# list instead of set to keep order
self._nodes = []
self._node_kwargs = {}
def add_node(self, node):
if str(node) not in self._nodes:
self._nodes.append(str(node))
self._node_kwargs[str(node)] = _merge_node_kwargs(
node.kwargs(),
self._node_kwargs.get(str(node)))
def nodes(self):
"""
Returns the list of unique node names used within this context.
"""
return self._nodes
def node_kwargs(self):
return self._node_kwargs
@context.define_context(allow_default=True)
class Node(object):
"""
A Node context is used to indicate that all Tasks instantiated within will
run on the given node name. (Only the name of the node actually counts.)
Example:
with TaskGroup() as tg:
with Node('node1'):
s1 = execution_step(...)
Task(step=s1)
with Node('node2'):
s2 = execution_step(...)
with Node('node1'):
s3 = execution_step(...)
In this example, all three execution steps will run in parallel.
Moreover, s1 and s3 will run on the same node, and can see each
others blobs.
Additionally, a Node can be passed implementation-specific kwargs,
in order to specify properties of the node.
"""
def __init__(self, node='local', **kwargs):
self._name = str(node)
self._kwargs = kwargs
Cluster.current().add_node(self)
def __str__(self):
return self._name
def kwargs(self):
return self._kwargs
class WorkspaceType(object):
"""
Determines whether tasks of a TaskGroup will run directly at the global
workspace, which is kept alive across runs, or whether a new child
workspace will be created for the run and destroyed afterwards.
"""
PRIVATE = 'private'
GLOBAL = 'global'
def get_setup_nets(key, steps_or_nets, target):
init_net = core.Net(key + '/init')
exit_net = core.Net(key + '/exit')
init_nets = []
exit_nets = []
objs = []
for step_or_net in steps_or_nets:
if hasattr(step_or_net, 'get_all_attributes'):
objs += step_or_net.get_all_attributes(key)
elif hasattr(step_or_net, 'get_attributes'):
objs += step_or_net.get_attributes(key)
for obj in objs:
# these are needed in order to allow nesting of TaskGroup, which
# is a feature not yet implemented.
if hasattr(obj, '_setup_used') and obj._setup_used:
continue
if hasattr(obj, '_setup_target') and obj._setup_target != target:
continue
if hasattr(obj, 'setup'):
nets = obj.setup(init_net)
if isinstance(nets, (list, tuple)):
init_nets += nets
elif isinstance(nets, (core.Net, core.ExecutionStep)):
init_nets.append(nets)
elif nets is not None:
raise TypeError('Unsupported type for setup: %s' % type(nets))
obj._setup_used = True
if hasattr(obj, 'exit'):
nets = obj.exit(exit_net)
if isinstance(nets, (list, tuple)):
exit_nets += nets
elif isinstance(nets, (core.Net, core.ExecutionStep)):
exit_nets.append(nets)
elif nets is not None:
raise TypeError('Unsupported type for setup: %s' % type(nets))
obj._setup_used = True
if len(init_net.Proto().op) > 0:
init_nets.insert(0, init_net)
if len(exit_net.Proto().op) > 0:
exit_nets.insert(0, exit_net)
return init_nets, exit_nets
@context.define_context(allow_default=False)
class TaskGroup(object):
"""
Context that gathers tasks which will run concurrently, potentially on
multiple nodes. All tasks in the same node will share the same workspace
and thus can share blobs, while tasks running in different nodes won't
be able to directly share data.
All tasks of the task group will start concurrently, and the task group
will finish execution when the last task of the group finishes.
Example:
# supose that s1 ... s5 are execution steps or nets.
with TaskGroup() as tg:
# these tasks go to default node 'local'
Task(step=s1)
Task(step=s2)
with Node('n2'):
Task(step=s3)
with Node('n1'):
Task(step=s4)
with Node('n2'):
Task(step=s5)
# this will run all steps in parallel.
# s1 and s2 will run at default node 'local'
# s3 and s5 will run at node 'n2'
# s4 will run at node 'n1'
session.run(tg)
"""
LOCAL_SETUP = 'local_setup'
def __init__(self, workspace_type=None):
self._plan_cache = None
self._tasks = []
self._already_used = False
self._prev_active = None
self._tasks_to_add = []
self._report_nets = {}
self._report_steps = []
self._workspace_type = workspace_type
self._tasks_by_node = None
def add(self, task):
assert not self._already_used, (
'Cannot add Task to an already used TaskGroup.')
assert (
self._workspace_type is None or
task._workspace_type is None or
self._workspace_type == task._workspace_type)
if task._workspace_type is None:
task._workspace_type = (
self._workspace_type or WorkspaceType.PRIVATE)
if self._workspace_type is None:
self._workspace_type = task._workspace_type
task._notify_used()
self._tasks.append(task)
def tasks(self):
for task in self._tasks_to_add:
self.add(task)
self._tasks_to_add = []
self._already_used = True
return self._tasks
def num_registered_tasks(self):
return len(self._tasks_to_add) + len(self._tasks)
def used_nodes(self):
# use list to keep order
used = []
for task in self._tasks + self._tasks_to_add:
if task.node not in used:
used.append(task.node)
return used
def report_step(self, step=None, node=None, interval_ms=1000):
"""
Add a "report step" to this TaskGroup. This step will run repeatedly
every `interval_ms` milliseconds for the duration of the TaskGroup
execution on each of the nodes. It is guaranteed that this step
will be run at least once after every Task in the node has finished.
"""
step = core.to_execution_step(step)
step.RunEveryMillis(interval_ms)
self._report_steps.append((str(node or Node.current(node)), step))
def report_net(self, net=None, node=None, report_interval=5):
"""
DEPRECATED. Use report_step instead.
"""
node = str(node or Node.current(node))
assert net is None or node not in self._report_nets
if node not in self._report_nets:
self._report_nets[node] = (
net if net else core.Net('%s/reporter' % node),
report_interval)
return self._report_nets[node][0]
def tasks_by_node(self, node_remap=None):
# tasks_by_node can't be called twice because the setup won't
# work properly a second time.
node_map = {}
for task in self.tasks():
node_map[task.node] =\
node_remap(task.node) if node_remap else task.node
if self._tasks_by_node is not None:
tasks_by_node, prev_node_map = self._tasks_by_node
assert prev_node_map == node_map, (
'Cannot call tasks_by_node multiple times.')
return tasks_by_node
# now we have report_steps. report_net is deprecated
for node, (net, interval) in self._report_nets.items():
self.report_step(net, node=node, interval_ms=interval * 1000)
self._report_nets = {}
tasks_by_node = defaultdict(list)
for task in self.tasks():
mapped_node = node_map[task.node]
tasks_by_node[mapped_node].append(task)
report_steps_by_node = defaultdict(list)
for original_node, step in self._report_steps:
report_steps_by_node[node_map[original_node]].append(step)
grouped_by_node = TaskGroup()
for node, tasks in tasks_by_node.items():
report_steps = report_steps_by_node[node]
node_inits, node_exits = get_setup_nets(
TaskGroup.LOCAL_SETUP,
[t.get_step() for t in tasks] + report_steps,
self)
# shortcut for single task with no queue
steps = report_steps
outputs = []
workspace_type = tasks[0].workspace_type()
for task in tasks:
step = task.get_step()
if step is not None:
steps.append(step)
outputs += task.outputs()
assert workspace_type == task.workspace_type(), (
'All tasks for a given node need same workspace type.')
if len(steps) == 0:
steps.append(core.execution_step('empty', []))
if len(steps) == 1:
step = steps[0]
else:
step = core.execution_step(
'%s:body' % node, steps, concurrent_substeps=True)
if len(node_inits) > 0 or len(node_exits) > 0:
steps = []
if len(node_inits) > 0:
steps.append(
core.execution_step('%s:init' % node, node_inits))
steps.append(step)
if len(node_exits) > 0:
steps.append(
core.execution_step('%s:exit' % node, node_exits))
step = core.execution_step(node, steps)
Task(
node=node, step=step, outputs=outputs,
name='grouped_by_node',
group=grouped_by_node, workspace_type=workspace_type)
self._tasks_by_node = (grouped_by_node, node_map)
return grouped_by_node
def to_task(self, node=None):
node = str(Node.current(node))
tasks = self.tasks_by_node(lambda x: node).tasks()
if len(tasks) == 0:
return Task()
return tasks[0]
class TaskOutput(object):
"""
Represents the output of a task. An output can be a blob,
a list of blob, or a record.
"""
def __init__(self, names):
self._schema = None
self._is_scalar = False
if isinstance(names, Field):
self._schema = names
names = self._schema.field_blobs()
self._is_scalar = type(names) not in (tuple, list)
if self._is_scalar:
names = [names]
self.names = names
self._values = None
def set(self, values, _fetch_func=None):
assert len(values) == len(self.names)
self._values = values
self._fetch_func = _fetch_func
def get(self):
assert self._values is not None, 'Output value not set yet.'
if self._is_scalar:
return self._values[0]
elif self._schema:
return from_blob_list(self._schema, self._values)
else:
return self._values
def fetch(self):
assert self._fetch_func is not None, (
'Cannot fetch value for this output.')
fetched_vals = [self._fetch_func(v) for v in self._values]
if self._is_scalar:
return fetched_vals[0]
elif self._schema:
return from_blob_list(self._schema, fetched_vals)
else:
return fetched_vals
def final_output(blob_or_record):
"""
Adds an output to the current Task, or if no task is active,
create a dummy task that returns the given blob or record
to the client. This will return the value of the blob or record when
the last task of the TaskGroup for a given node finishes.
"""
cur_task = Task.current(required=False) or Task()
return cur_task.add_output(blob_or_record)
class TaskOutputList(object):
""" Keeps a list of outputs for a task """
def __init__(self, outputs=None):
self.outputs = outputs or []
def names(self):
"""
Retrive the output names.
TODO(azzolini): make this schema-based.
"""
names = []
for o in self.outputs:
names += o.names
return names
def set_values(self, values, _fetch_func=None):
offset = 0
for o in self.outputs:
num = len(o.names)
o.set(values[offset:offset + num], _fetch_func)
offset += num
assert offset == len(values), 'Wrong number of output values.'
@context.define_context()
class Task(object):
"""
A Task is composed of an execution step and zero or more outputs.
Tasks are executed in the context of a TaskGroup, which, in turn, can
be run by a Session.
Task outputs are fetched by the session at the end of the run.
"""
TASK_SETUP = 'task_setup'
REPORT_STEP = 'report_step'
_global_names_used = set()
@staticmethod
def _get_next_name(node, group, name):
basename = str(node) + '/' + str(name)
names_used = (
Task._global_names_used
if group is None else
set(t.name for t in group._tasks_to_add))
cur_name = basename
i = 0
while cur_name in names_used:
i += 1
cur_name = '%s:%d' % (basename, i)
return cur_name
def __init__(
self, step=None, outputs=None,
workspace_type=None, group=None, node=None, name=None):
"""
Instantiate a Task and add it to the current TaskGroup and Node.
"""
if not name and isinstance(step, core.ExecutionStep):
name = step.Proto().name
if not name:
name = 'task'
# register this node name with active context
self.node = str(Node.current(None if node is None else Node(node)))
self.group = TaskGroup.current(group, required=False)
self.name = Task._get_next_name(self.node, self.group, name)
# may need to be temporarily removed later if Task used as a context
if self.group is not None:
self.group._tasks_to_add.append(self)
self._already_used = False
self._step = None
self._step_with_setup = None
self._outputs = []
if step is not None:
self.set_step(step)
if outputs is not None:
self.add_outputs(outputs)
self._pipeline = None
self._is_pipeline_context = False
self._workspace_type = workspace_type
self._report_net = None
def __enter__(self):
# temporarily remove from _tasks_to_add to ensure correct order
if self.group is not None:
self.group._tasks_to_add.remove(self)
self._assert_not_used()
assert self._step is None, 'This Task already has an execution step.'
from caffe2.python import net_builder
self._net_builder = net_builder.NetBuilder(_fullname=self.name)
self._net_builder.__enter__()
return self
def __exit__(self, type, value, traceback):
self._net_builder.__exit__(type, value, traceback)
if type is None:
self.set_step(self._net_builder)
if self.group is not None:
self.group._tasks_to_add.append(self)
self._net_builder = None
def workspace_type(self):
return self._workspace_type
def _assert_not_used(self):
assert not self._already_used, (
'Cannot modify task since it is already been used.')
def add_output(self, output):
self._assert_not_used()
output = (
output if isinstance(output, TaskOutput) else TaskOutput(output))
self._outputs.append(output)
return output
def add_outputs(self, outputs):
self._assert_not_used()
if type(outputs) not in (list, tuple):
return self.add_output(outputs)
else:
return [self.add_output(output) for output in outputs]
def set_step(self, step):
self._assert_not_used()
self._step = core.to_execution_step(step)
def get_step(self):
if self._step is not None and self._step_with_setup is None:
report_steps = filter(
lambda s: not hasattr(s, '_report_step_used'),
self._step.get_all_attributes(Task.REPORT_STEP))
for step in report_steps:
step._report_step_used = True
if not step.Proto().run_every_ms:
step.RunEveryMillis(1000)
init_nets, exit_nets = get_setup_nets(
Task.TASK_SETUP, [self._step] + report_steps, self)
if len(self._outputs) == 0:
output_net = core.Net('%s:output' % self.name)
self.add_output(output_net.ConstantFill(
[], 1, dtype=core.DataType.INT32, value=0))
exit_nets.append(output_net)
body = self._step if not report_steps else core.execution_step(
'%s:body', report_steps + [self._step])
self._step_with_setup = core.execution_step(
self.name,
[
core.execution_step('%s:init' % self.name, init_nets),
body,
core.execution_step('%s:exit' % self.name, exit_nets),
]
)
elif self._step_with_setup is None:
self._step_with_setup = core.execution_step(self.name, [])
return self._step_with_setup
def output_list(self):
return TaskOutputList(self._outputs)
def outputs(self):
return self._outputs
def _notify_used(self):
self.get_step()
self._already_used = True
class SetupNets(object):
"""
Allow to register a list of nets to be run at initialization
and finalization of Tasks or TaskGroups.
For example, let's say you have the following:
init_net = core.Net('init')
my_val = init_net.ConstantFill([], 'my_val', value=0)
net = core.Net('counter')
net.Add([my_val, net.Const(1),], [my_val])
with TaskGroup() as task_group:
with Node('trainer'):
my_task = Task(step=[net])
In order to have `init_net` run once before `net` runs for the
first time, you can do one of the following:
net.add_object(Task.TASK_SETUP, SetupNets([init_net]))
or
net.add_object(TaskGroup.LOCAL_SETUP, SetupNets([init_net]))
- With Task.TASK_SETUP, init_net will run once at my_task startup.
- With TaskGroup.LOCAL_SETUP, init_net will run once on node 'trainer',
before any task of the task group is run on that node.
The same SetupNets object can be added to multiple nets. It will only
run once per Task/TaskGroup run.
"""
def __init__(self, init_nets=None, exit_nets=None):
self.init_nets = init_nets
self.exit_nets = exit_nets
def setup(self, init_net):
return self.init_nets
def exit(self, exit_net):
return self.exit_nets
|
import unittest
from caffe2.python import convnet_benchmarks as cb
from caffe2.python import test_util, workspace
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestConvnetBenchmarks(test_util.TestCase):
def testConvnetBenchmarks(self):
all_args = [
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1',
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1 --forward_only',
]
for model in [cb.AlexNet, cb.OverFeat, cb.VGGA, cb.Inception]:
for arg_str in all_args:
args = cb.GetArgumentParser().parse_args(arg_str.split(' '))
cb.Benchmark(model, args)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python.optimizer import build_sgd, build_ftrl, build_adagrad, build_adam
from caffe2.python.optimizer_test_util import OptimizerTestBase
from caffe2.python.test_util import TestCase
from caffe2.python import workspace
import numpy as np
class TestSgd(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_sgd(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertFalse(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().shared:
tensor = workspace.FetchBlob(param)
np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)
class TestFtrl(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_ftrl(
model, engine=None, alpha=1.0, beta=0.1, lambda1=0.0, lambda2=0.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdagrad(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_adagrad(model, base_learning_rate=1.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdam(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_adam(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
self.assertTrue(workspace.HasBlob("optimizer_iteration"))
iteration_tensor = workspace.FetchBlob("optimizer_iteration")
np.testing.assert_allclose(np.array([2000]),
iteration_tensor,
atol=1e-5)
for param in optimizer.get_auxiliary_parameters().shared:
workspace.FetchBlob(param)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import scope, core
from caffe2.proto import caffe2_pb2
import unittest
import threading
import time
SUCCESS_COUNT = 0
def thread_runner(idx, testobj):
global SUCCESS_COUNT
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
namescope = "namescope_{}".format(idx)
dsc = core.DeviceOption(caffe2_pb2.CUDA, idx)
with scope.DeviceScope(dsc):
with scope.NameScope(namescope):
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
time.sleep(0.01 + idx * 0.01)
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
SUCCESS_COUNT += 1
class TestScope(unittest.TestCase):
def testNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
try:
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
def testDevicescopeBasic(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(caffe2_pb2.CUDA, 9)
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
self.assertEquals(scope.CurrentDeviceScope(), None)
def testDevicescopeAssertion(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(caffe2_pb2.CUDA, 9)
try:
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentDeviceScope(), None)
def testMultiThreaded(self):
"""
Test that name/device scope are properly local to the thread
and don't interfere
"""
global SUCCESS_COUNT
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentDeviceScope(), None)
threads = []
for i in range(4):
threads.append(threading.Thread(
target=thread_runner,
args=(i, self),
))
for t in threads:
t.start()
with scope.NameScope("master"):
self.assertEquals(scope.CurrentDeviceScope(), None)
self.assertEquals(scope.CurrentNameScope(), "master/")
for t in threads:
t.join()
self.assertEquals(scope.CurrentNameScope(), "master/")
self.assertEquals(scope.CurrentDeviceScope(), None)
# Ensure all threads succeeded
self.assertEquals(SUCCESS_COUNT, 4)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.core import CreatePythonOperator
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
def SubFunctionThatThrowsRuntimeError():
raise RuntimeError("This is an intentional exception.")
def MainOpFunctionThatThrowsRuntimeError(inputs, _):
return SubFunctionThatThrowsRuntimeError()
class PythonOpTest(hu.HypothesisTestCase):
@given(x=hu.tensor())
def test_feed(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(type(inputs[0].shape), tuple)
self.assertEqual(type(inputs[0].data), np.ndarray)
np.testing.assert_almost_equal(x, inputs[0].data)
op = CreatePythonOperator(f, ["x"], [])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
def test_exception(self):
op = CreatePythonOperator(MainOpFunctionThatThrowsRuntimeError, [], [])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
@given(x=hu.tensor())
def test_feed_with_helper_function(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(type(inputs[0].shape), tuple)
self.assertEqual(type(inputs[0].data), np.ndarray)
np.testing.assert_almost_equal(x, inputs[0].data)
net = core.Net("test")
net.Python(f)(["x"], [])
workspace.FeedBlob("x", x)
workspace.RunNetOnce(net)
@given(x=hu.tensor())
def test_feed_with_gc(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
np.testing.assert_almost_equal(x, inputs[0].data)
op = CreatePythonOperator(f, ["x"], [])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
del f
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
@given(x=hu.tensor())
def test_reshape(self, x):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(x.shape, outputs[0].shape)
outputs[0].data[...] = inputs[0].data
op = CreatePythonOperator(f, ["x"], ["y"])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
y = workspace.FetchBlob("y")
np.testing.assert_almost_equal(x, y)
@given(x=hu.tensor())
def test_workspace_manipulation(self, x):
"""
Verify that python op can manipulate workspace directly
"""
def f(inputs, outputs, ws):
fetched = ws.blobs['internal'].fetch()
np.testing.assert_almost_equal(fetched, x)
ws = workspace.C.Workspace()
net = core.Net("test")
net.GivenTensorFill([], ['internal'], values=x, shape=x.shape)
net.Python(f, pass_workspace=True)([], [])
ws.run(net)
@given(x=hu.tensor())
def test_caught_exception_doesnt_terminate(self, x):
def f(inputs, outputs):
try:
raise Exception("Exception in handler")
except Exception:
pass
op = CreatePythonOperator(f, ["x"], ["y"])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
@given(x=hu.tensor(),
n=st.integers(min_value=1, max_value=20),
w=st.integers(min_value=1, max_value=20))
def test_multithreaded_evaluation(self, x, n, w):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
outputs[0].data[...] = inputs[0].data
ops = [CreatePythonOperator(f, ["x"], [str(i)]) for i in range(n)]
net = core.Net("net")
net.Proto().op.extend(ops)
net.Proto().type = "dag"
net.Proto().num_workers = w
iters = 100
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
workspace.FeedBlob("x", x)
workspace.RunPlan(plan.Proto().SerializeToString())
for i in range(n):
y = workspace.FetchBlob(str(i))
np.testing.assert_almost_equal(x, y)
@given(x=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_gradient(self, x, in_place, gc, dc):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
outputs[0].data[...] = inputs[0].data * 2
def grad_f(inputs, outputs):
# Ordering is [inputs, outputs, grad_outputs]
grad_output = inputs[2]
grad_input = outputs[0]
grad_input.reshape(grad_output.shape)
grad_input.data[...] = grad_output.data * 2
op = CreatePythonOperator(
f, ["x"], ["x" if in_place else "y"], grad_f=grad_f)
self.assertGradientChecks(gc, op, [x], 0, [0])
self.assertDeviceChecks(dc, op, [x], [0])
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_gradient_multiple(self, inputs, gc, dc):
(x1, x2) = inputs
def f(inputs, outputs):
for idx in [0, 1]:
self.assertEqual(type(inputs[idx].shape), tuple)
outputs[idx].reshape(inputs[idx].shape)
outputs[idx].data[...] = inputs[idx].data * 2
def grad_f(inputs, outputs):
# Ordering is [inputs, outputs, grad_outputs]
self.assertEqual(len(inputs), 6)
self.assertEqual(len(outputs), 2)
for (grad_output_idx, grad_input_idx) in [(4, 0), (5, 1)]:
grad_output = inputs[grad_output_idx]
grad_input = outputs[grad_input_idx]
grad_input.reshape(grad_output.shape)
grad_input.data[...] = grad_output.data * 2
op = CreatePythonOperator(f, ["x1", "x2"], ["y1", "y2"], grad_f=grad_f)
for idx in [0, 1]:
self.assertGradientChecks(gc, op, [x1, x2], idx, [0, 1])
self.assertDeviceChecks(dc, op, [x1, x2], [0, 1])
|
## @package hsm_util
# Module caffe2.python.hsm_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import hsm_pb2
'''
Hierarchical softmax utility methods that can be used to:
1) create TreeProto structure given list of word_ids or NodeProtos
2) create HierarchyProto structure using the user-inputted TreeProto
'''
def create_node_with_words(words, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for word in words:
node.word_ids.append(word)
return node
def create_node_with_nodes(nodes, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for child_node in nodes:
new_child_node = node.children.add()
new_child_node.MergeFrom(child_node)
return node
def create_hierarchy(tree_proto):
max_index = 0
def create_path(path, word):
path_proto = hsm_pb2.PathProto()
path_proto.word_id = word
for entry in path:
new_path_node = path_proto.path_nodes.add()
new_path_node.index = entry[0]
new_path_node.length = entry[1]
new_path_node.target = entry[2]
return path_proto
def recursive_path_builder(node_proto, path, hierarchy_proto, max_index):
node_proto.offset = max_index
path.append([max_index,
len(node_proto.word_ids) + len(node_proto.children), 0])
max_index += len(node_proto.word_ids) + len(node_proto.children)
if hierarchy_proto.size < max_index:
hierarchy_proto.size = max_index
for target, node in enumerate(node_proto.children):
path[-1][2] = target
max_index = recursive_path_builder(node, path, hierarchy_proto,
max_index)
for target, word in enumerate(node_proto.word_ids):
path[-1][2] = target + len(node_proto.children)
path_entry = create_path(path, word)
new_path_entry = hierarchy_proto.paths.add()
new_path_entry.MergeFrom(path_entry)
del path[-1]
return max_index
node = tree_proto.root_node
hierarchy_proto = hsm_pb2.HierarchyProto()
path = []
max_index = recursive_path_builder(node, path, hierarchy_proto, max_index)
return hierarchy_proto
|
## @package memonger
# Module caffe2.python.memonger
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import networkx as nx
import collections
import time
import heapq
import copy
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
import enum
import logging
import numpy as np
log = logging.getLogger("memonger")
log.setLevel(logging.INFO)
LiveRange = collections.namedtuple('LiveRange', ["defined", "used", "size"])
def share_grad_blobs(
net,
losses,
param_grads,
namescope,
dont_share_blobs=None,
share_activations=True,
blob_shapes=None,
):
'''
Implements similar optimization as Torch's shareGradInput():
for the gradients that are passed between layers, share blobs between
operators when possible. This yields significant memory savings with
deep networks.
Returns an optimized protobuf (assign to net._net)
'''
def is_grad_blob(b):
name = str(b)
# Note: need to look at _{namescope} pattern as it matches
# to handle the auto-split gradients
return "_grad" in name and (name.startswith(namescope) or
name.startswith("_" + namescope)) and name not in param_grads
def is_grad_op(op):
# TODO: something smarter
for b in list(op.input) + list(op.output):
if is_grad_blob(b):
return True
return False
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
activations = []
external_output = set(net.Proto().external_output)
# Hacky way to get activations, think of a better way
for op in net.Proto().op:
for b in op.output:
if b + "_w" in op.input and b not in external_output:
activations.append(b)
# Remove last activations, as they are usually accessed externally
activations = set(activations[:-2])
# Gradient ops
grad_ops = [op for op in netproto.op if is_grad_op(op)]
return _compute_blob_recycling_for_dag(
netproto,
losses,
grad_ops,
lambda b: is_grad_blob(b) or (share_activations and b in activations),
namescope,
{} if dont_share_blobs is None else dont_share_blobs,
blob_shapes
)
def optimize_inference_for_dag(net, input_blobs, namescope=""):
netproto = copy.deepcopy(net.Proto())
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
def is_activation_blob(b):
return b not in external_input and b not in external_output
seen_as_output = set()
ops = list(net.Proto().op)
# Sanity check: check that all external inputs are properlyh accounted
# and that no gradient ops are included in 'net'
for op in ops:
for b in op.input:
if is_activation_blob(b) and b not in seen_as_output:
assert False, "{} not in external input".format(b)
seen_as_output = seen_as_output.union(set(op.output))
assert not op.is_gradient_op, \
"You can only pass inference-only nets to optimize_inference_for_dag"
return _compute_blob_recycling_for_dag(
netproto, input_blobs, ops, is_activation_blob,
namescope, set(), None,
)
def _compute_blob_recycling_for_dag(
netproto, heads, ops, is_shareable,
namescope, dont_share_blobs, blob_shapes=None,
):
'''
Computes a blob recycling by traversing the computation DAG. The resulting
model can be executed safely on a DAGNet.
'''
start_time = time.time()
# Create mapping from blobs to ops
blobs_to_ops = collections.defaultdict(lambda: [])
blob_input_count = collections.defaultdict(lambda: 0)
op_inputs = collections.defaultdict(lambda: 0)
op_visit_count = collections.defaultdict(lambda: 0)
share_counts = collections.defaultdict(lambda: 0)
blob_sizes = {} if blob_shapes is not None else None
# First figure out which of the shareable blobs
# are 'internal' to the optimization. For example, if optimizing
# only gradient ops, then activation blobs will be 'external' as they
# are not output by these ops.
optim_op_outputs = set()
for op in ops:
optim_op_outputs.update(set(op.output))
for i, op in enumerate(ops):
for inp in op.input:
if is_shareable(inp) or inp in heads:
if inp in optim_op_outputs:
blobs_to_ops[inp].append(i)
op_inputs[i] += 1
else:
# For external blobs, we don't increase the op_inputs
# count.
blobs_to_ops[inp].append(i)
share_counts[inp] = 1
# Traverse operators starting from the heads' blobs.
# Keep tabs on when blobs are seen first and last, and also
# when operators have their input satisfied. Share blobs only
# under same branch, avoiding problems with parallel workers.
output_blobs = set()
mapping = {}
unknown_shapes = set()
def infer_blob_size(b):
if b in blob_shapes:
return np.prod(blob_shapes[b])
else:
unknown_shapes.add(b)
return 0
saved_count = 0
def descend(op_idx, free_blobs):
cur_op = ops[op_idx]
new_free_blobs = set()
unused_free_blobs = set(free_blobs)
saved = 0
for inp in cur_op.input:
if is_shareable(inp):
blob_input_count[inp] += 1
if blob_input_count[inp] == len(blobs_to_ops[inp]):
actual_blob = inp if inp not in mapping else mapping[inp]
if actual_blob not in dont_share_blobs:
new_free_blobs.add(
(-share_counts[actual_blob], actual_blob),
)
for outp in cur_op.output:
if is_shareable(outp):
if outp not in output_blobs:
# First seen this blob as output, can assign to a free blob
if len(free_blobs) > 0:
if blob_sizes is None:
(negcnt, freeb) = heapq.heappop(free_blobs)
else:
bsize = infer_blob_size(outp)
best_blob = None
best_size = -1
# Heuristic to choose the most suitably sized blob
for b in free_blobs:
sz = blob_sizes[b]
if sz >= best_size:
if best_size < bsize or best_size >= sz:
best_size = sz
best_blob = b
assert best_blob is not None
freeb = best_blob
# blob_sizes[freeb] = max(best_size, bsize)
free_blobs.remove(freeb)
saved += bsize
mapping[outp] = freeb
if freeb in unused_free_blobs:
unused_free_blobs.remove(freeb)
share_counts[freeb] += 1
output_blobs.add(outp)
for (cnt, nf) in new_free_blobs:
if blob_sizes is None:
heapq.heappush(free_blobs, (cnt, nf))
else:
if nf not in blob_sizes:
blob_sizes[nf] = infer_blob_size(outp)
assert nf not in free_blobs, \
"Blob {} double-inserted to free_blobs".format(nf)
free_blobs.append(nf)
free_blobs_fwd = free_blobs
for outp in cur_op.output:
for inp_op_idx in blobs_to_ops[outp]:
op_visit_count[inp_op_idx] += 1
# Descend only if we have satisfied all inputs
if op_visit_count[inp_op_idx] == op_inputs[inp_op_idx]:
(unused, saved_desc) = descend(inp_op_idx, free_blobs_fwd)
saved += saved_desc
unused_free_blobs = unused.intersection(unused_free_blobs)
# We can pass unused free blobs to other branch
free_blobs_fwd = list(
unused.intersection(set(free_blobs_fwd))
)
return (unused_free_blobs, saved)
# Start DFS from the heads' (losses or inputs)
for head_blob in heads:
for op_idx in blobs_to_ops[head_blob]:
(_, saved) = descend(op_idx, [])
saved_count += saved
# Rename the shared blobs
shared_blobs = set(mapping.values())
renamed = {}
for j, b in enumerate(shared_blobs):
if b in optim_op_outputs:
renamed[b] = namescope + "__m{}_shared".format(j)
else:
renamed[b] = b
# Add the originators
mapping.update(renamed)
if saved_count > 0:
log.info("Remapping {} blobs, using {} shared; saved apprx {} MB".format(
len(mapping), len(renamed), int(saved_count * 4 / 1024 / 1024),
))
log.info("Could not infer sizes for: {}".format(unknown_shapes))
else:
log.info("Remapping {} blobs, using {} shared".format(
len(mapping), len(renamed),
))
apply_assignments(netproto, mapping)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
return netproto
def _find_source_nodes(g):
''' Return nodes without predecessors '''
ret = []
for cn in g:
cur_pred = g.predecessors(cn)
if not cur_pred:
ret.append(cn)
return ret
def _find_target_nodes(g):
''' Return nodes without successors '''
ret = []
for cn in g:
cur_succ = g.successors(cn)
if not cur_succ:
ret.append(cn)
return ret
def _add_single_target_ifneeded(g):
targets = _find_target_nodes(g)
assert len(targets) >= 1
if len(targets) == 1:
return g
ret = copy.deepcopy(g)
def _next_available_idx(g):
ret = -1
for cn in g:
if cn > ret:
ret = cn
ret += 1
return ret
target_node_idx = _next_available_idx(g)
ret.add_node(target_node_idx)
for cn in targets:
ret.add_edge(cn, target_node_idx)
return ret
def _get_path(pred_list, dist_list):
''' Get the path from nx.bellman_ford()'s output '''
# distances are negative
assert all(dist_list[x] <= 0 for x in dist_list)
# node with longest distance to source is the target
target = min(dist_list, key=lambda x: dist_list[x])
ret = []
cur = target
while cur is not None:
ret.append(cur)
cur = pred_list[cur]
return list(reversed(ret))
def _get_longest_paths(g, source_nodes):
''' Get the longest path for nodes in 'source_nodes'
Find with bellman_ford() by setting weight = -1
'''
ng = copy.deepcopy(g)
for u, v in ng.edges():
ng[u][v]["weight"] = -1
ret = {}
for cn in source_nodes:
pred, dist = nx.bellman_ford(ng, cn, weight="weight")
path = _get_path(pred, dist)
assert path[0] == cn
assert len(path) - 1 == -dist[path[-1]]
ret[cn] = path
return ret
def _build_tree(paths):
''' Build a tree for given paths based on common elements.
Last elements of all paths are the same, which is the root of the tree.
'''
assert all(cp[-1] == paths[0][-1] for cp in paths)
g = nx.DiGraph()
node_set = {y for x in paths for y in x}
g.add_nodes_from(node_set)
for cp in paths:
for ce in zip(cp[0:-1], cp[1:]):
g.add_edge(ce[1], ce[0])
root = paths[0][-1]
_compute_tree_height(g, root)
return (g, root)
def _compute_tree_height(g, root):
''' Compute the heights of the tree for all nodes
Height of leaves are 0
'''
def _get_height(root):
children = g.successors(root)
height = 0
if children:
child_heights = [_get_height(x) for x in children]
height = max(child_heights) + 1
g.node[root]["height"] = height
return height
_get_height(root)
def _sort_tree_leaves(g, root):
''' For each node, sort its child nodes based on the height of the nodes.
Return the leaf nodes of the tree after sorting.
'''
def _get_height(root):
return g.node[root]["height"]
def _get_sorted_leaves(root):
children = g.successors(root)
if not children:
return [root]
child_heights = [_get_height(x) for x in children]
order = sorted(range(len(children)), key=lambda x: child_heights[x])
ret = []
for co in order:
cr = children[co]
ret += _get_sorted_leaves(cr)
return ret
return _get_sorted_leaves(root)
def topological_sort_traversal_longest_path(g):
''' The graph 'g' may contain several source nodes (nodes without incoming
edge), which could be in any order and still be a valid
topological sorting result. We would like to arrange these source nodes
so that the average live spans of the computed blobs are shorter.
The idea is to sort the source nodes based on the length of their path to
the target node so that the one with longer path is used first.
This is done by:
- Add a single target node if there are multiple target nodes in 'g'.
- Find the longest path between each source and the target node.
- Convert the longest paths to a tree with the target node being the root
and source nodes being the leaves.
- Sort the nodes of the tree based on the height of the tree.
'''
gt = _add_single_target_ifneeded(g)
source_nodes = _find_source_nodes(gt)
lpaths = _get_longest_paths(gt, source_nodes)
tree, root = _build_tree(lpaths.values())
sorted_sources = _sort_tree_leaves(tree, root)
assert(sorted(sorted_sources) == sorted(source_nodes))
ret = nx.topological_sort(g, sorted_sources)
assert(len(ret) == len(g.node))
return ret
def topological_sort_traversal(g):
return nx.topological_sort(g)
def compute_ranges(linearized_ops, blob_sizes=None):
if not blob_sizes:
log.warning('Provide blob sizes to get more accurate assignments.')
blobs = collections.defaultdict(
lambda: LiveRange(defined=None, used=None, size=None))
for i, op in enumerate(linearized_ops):
for blob in op.input:
used = blobs[blob].used
if used is None:
used = i
else:
used = max(used, i)
blobs[blob] = blobs[blob]._replace(used=used)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
for blob in op.output:
defined = blobs[blob].defined
if defined is None:
defined = i
else:
defined = min(defined, i)
blobs[blob] = blobs[blob]._replace(defined=defined)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
return blobs
def is_compatible(candidate_range, assignment, static_blobs):
(name, range_) = assignment[-1]
if name in static_blobs:
return False
if candidate_range.defined is None or range_.defined is None \
or range_.used is None:
return False
return candidate_range.defined > range_.used
def compute_blob_assignments(assignments):
blob_assignments = {}
for assignment in assignments:
if len(assignment) == 1:
continue
last_blob, _ = assignment[-1]
for (blob, _) in assignment:
blob_assignments[blob] = last_blob
return blob_assignments
def _get_max_size(assignment):
if not assignment:
return 0
ret = max([x[1].size for x in assignment])
ret = 0 if ret is None else ret
return ret
def get_memory_usage(assignments):
ret = 0
for cur in assignments:
ret += _get_max_size(cur)
return ret
def compute_assignments_greedy(ranges_sorted, init_assignments=None):
assignments = init_assignments or []
visited = {y[0] for x in assignments for y in x}
for (name, range_) in ranges_sorted:
if name in visited:
continue
assigned = False
best_assignment = 0
min_dist = float("inf")
candidate_size = range_.size or 0
for idx, assignment in enumerate(assignments):
if is_compatible(range_, assignment, []):
assigned = True
dist = abs(_get_max_size(assignment) - candidate_size)
if dist < min_dist:
min_dist = dist
best_assignment = idx
if assigned:
assignment = assignments[best_assignment]
assignment.append((name, range_))
else:
assignments.append([(name, range_)])
return assignments
def _get_count(assignments):
''' Return number of blobs in assignments '''
if assignments:
return sum([len(x) for x in assignments])
return 0
def compute_assignments_dp(ranges_sorted, init_assignment, counter=None):
''' Compute assignment for blobs in 'ranges_sorted' on top of 'init_assignment'
using dynamic programming + recursion.
ranges_sorted: blobs sorted by 'used'
init_assignment: assignment to start with, blobs in 'ranges_sorted' should
not be used in 'init_assignment'
Using f(b, k, init) to represent the best assignment for blobs b[0:k]
given initial assignment 'init', we have
f(b, k, init) = f(b, j, init) +
find_best(b[j:k], f(b, j, init))
where j is the index of the last best assignment that is independent of
blob b[k - 1] (b[k - 1] is compatible with all assignments in
f(b, j, init)), and find_best(b1, init1) gives the best assignment
for blobs in 'b1' based on the initial assignment 'init1', and blobs
b1[0:-1] should be incompatible with with b1[-1]. f(b, len(b), []) gives
the best assignment for blobs 'b'.
For find_best(b, init), since b[0:-1] are not compatible with b[-1], we
could reduce it to a smaller problem to find best assignment for b[0:-1]
as
find_best(b, init) = min {
f(b[0:-1], len(b) - 1, init - x) + [x, b[-1]] for x in init, or
f(b[0:-1], len(b) - 1, init) + [b[-1]]
}
where min{} gives the assignment with minimum memory usage.
'''
def _get_compatible_prev(candidate_range, best_assignments, cur_idx):
''' Find closest position k of best_assignments that is independent of
candidate_range that candiate_range is compatible with all assignments
in best_assignments[k].
Return -1 if not found.
'''
def is_compatible_all(candidate_range, assignments):
''' return true if compatiable for all assignments in assignments '''
return all([is_compatible(candidate_range[1], x, []) for x in assignments])
ii = cur_idx - 1
while ii >= 0:
cba = best_assignments[ii]
if is_compatible_all(candidate_range, cba):
return ii
ii -= 1
return -1
def _find_best(ranges, init_assignment, prev_best_assignment, counter):
''' Find the best assignment for blobs 'ranges' given an initialized
assignment 'init_assignment'.
Blobs in ranges[0:-1] should be incompatible with blob range[-1].
'prev_best_assignment': best assignment for blobs in ranges[:-1]
By assigning ranges[-1] to each assignment k in 'init_assignment' or
in a new assignment, the problem becomes a smaller problem to find
the best assignment for ranges[0:-1] given the initial assignment
init_assigment[0:k, (k+1):-1].
'''
# Blob to check
find_range = ranges[-1]
# Blobs in ranges[0:-1] are incompatible with ranges[-1] so that we can
# reduce it to a smaller problem.
assert all(not is_compatible(x[1], [find_range], []) for x in ranges[0:-1])
sz = len(init_assignment)
best_candidates = []
# Try to assign 'find_range' to each assignment in init_assignment
for ii in range(sz):
if not is_compatible(find_range[1], init_assignment[ii], []):
continue
cur_best = copy.deepcopy(init_assignment)
cur_best[ii].append(find_range)
if len(ranges) > 1:
cur_best_tmp = [x for i, x in enumerate(cur_best) if i != ii]
# reduce to a smaller dp problem
cur_best_tmp = compute_assignments_dp(
ranges[:-1], cur_best_tmp, counter)
cur_best = cur_best_tmp + [cur_best[ii]]
best_candidates.append(cur_best)
# Try to put 'find_range' in a new assignment
best_candidates.append(prev_best_assignment + [[find_range]])
ret = min(best_candidates, key=lambda x: get_memory_usage(x))
return ret
if not counter:
counter = [0]
counter[0] += 1
if counter and counter[0] % 5000 == 0:
rs = [ranges_sorted[0][1].defined, ranges_sorted[-1][1].used]
log.info('Finding assignments {} ({} -> {})...'.format(
counter[0], rs[0], rs[1]))
init_assignment = init_assignment or []
# best_assignments[k]: best assignments for first k blobs ranges_sorted[0:(k+1)]
best_assignments = []
# Find best assignment for blobs ranges_sorted[0:ii]
for ii, cur_range in enumerate(ranges_sorted):
# closest best_assignment that is independent of ranges_sorted[ii]
prev_idx = _get_compatible_prev(cur_range, best_assignments, ii)
prev_best = copy.deepcopy(init_assignment) if prev_idx < 0 else \
copy.deepcopy(best_assignments[prev_idx])
# Need to find best assignment for blobs in 'ranges_part'
ranges_part = ranges_sorted[(prev_idx + 1):(ii + 1)]
cur_best = _find_best(
ranges_part, prev_best,
best_assignments[-1] if best_assignments else init_assignment,
counter)
assert _get_count(cur_best) == _get_count(prev_best) + len(ranges_part)
best_assignments.append(copy.deepcopy(cur_best))
assert len(best_assignments) == len(ranges_sorted)
best = best_assignments[-1]
return best
def get_updated_ranges(ranges, max_live=None):
''' Set LiveRange.defined = -1 if it is None
Set LiveRange.used = max_live if it is None
Set LiveRanee.size = 1 if it is None
'''
def _get_max_live(ranges):
max_live = max(x[1].used for x in ranges if x[1].used) + 1
return max_live
def _update_range(x, max_live, size):
cx = x
if x[1].defined is None:
cx = (cx[0], cx[1]._replace(defined=-1))
if x[1].used is None:
cx = (cx[0], cx[1]._replace(used=max_live))
if x[1].size is None:
cx = (cx[0], cx[1]._replace(size=size))
return cx
if max_live is None:
max_live = _get_max_live(ranges)
ranges = [_update_range(x, max_live, 1) for x in ranges]
return ranges
def compute_assignments(ranges, static_blobs, algo):
'''
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
'''
# Sort the ranges based on when they are last used.
# If LiveRange.used is None, then the blob is never used and could
# be consumed externally. Sort these to the end of the list as opposed
# to the beginning so that they can be shared as well.
ranges = sorted(
list(ranges.items()),
key=lambda p: (p[1].used is None, p[1].used),
)
# Update None values
ranges = get_updated_ranges(ranges)
# Sharable blobs
ranges_sharable = [x for x in ranges if x[0] not in static_blobs]
# Static blobs, not sharable
ranges_static = [x for x in ranges if x[0] in static_blobs]
log.info("Total sharable blobs {}".format(len(ranges_sharable)))
best_assignment = []
if algo == AssignmentAlgorithm.DYNAMIC_PROGRAMMING:
best_assignment = compute_assignments_dp(ranges_sharable, [])
elif algo == AssignmentAlgorithm.GREEDY:
best_assignment = compute_assignments_greedy(ranges_sharable, [])
else:
assert "Invalid algo name {}".format(algo)
best_assignment += [[x] for x in ranges_static]
# verify_assignments(best_assignment)
return best_assignment
def verify_assignments(assignments):
for cur in assignments:
for x, y in zip(cur[0:-1], cur[1:]):
assert x[1].used < y[1].defined
def compute_interference_graph(ops):
g = nx.DiGraph()
for i, op in enumerate(ops):
g.add_node(i, op=op)
for i, parent_op in enumerate(ops):
for j, child_op in enumerate(ops):
if i == j:
continue
if any(output in child_op.input for output in parent_op.output):
deps = set(child_op.input).intersection(parent_op.output)
g.add_edge(i, j, deps=deps)
assert nx.is_directed_acyclic_graph(g), child_op
return g
Optimization = collections.namedtuple(
'Optimization', ['net', 'assignments', 'blob_assignments'])
def apply_assignments(net, blob_assignments):
def canonical_name(blob):
if blob not in blob_assignments:
return blob
return blob_assignments[blob]
for op in net.op:
# Descend into subnets of the recurrent network
if op.type.startswith('RecurrentNetwork'):
apply_recurrent_blob_assignments(op, blob_assignments, canonical_name)
for i, input_ in enumerate(op.input):
op.input[i] = canonical_name(input_)
for i, output in enumerate(op.output):
op.output[i] = canonical_name(output)
def apply_recurrent_blob_assignments(op, blob_assignments, canonical_name):
log.debug("Applying assignments to recurrent op: {}".format(op.type))
import google.protobuf.text_format as protobuftx
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(step_arg.s, step_proto)
apply_assignments(step_proto, blob_assignments)
for i, einp in enumerate(step_proto.external_input):
if einp in blob_assignments:
step_proto.external_input[i] = canonical_name(einp)
step_arg.s = str(step_proto)
# Store renamings
for blob, renamed in blob_assignments.items():
if blob in list(op.input) + list(op.output):
a = caffe2_pb2.Argument()
a.name = blob + ".rename"
a.s = str(renamed)
op.arg.extend([a])
class AssignmentAlgorithm(enum.Enum):
GREEDY = 0
DYNAMIC_PROGRAMMING = 1
def optimize_interference(net, static_blobs,
ordering_function=topological_sort_traversal,
blob_sizes=None,
algo=AssignmentAlgorithm.GREEDY):
"""
ordering_function: topological_sort_traversal or
topological_sort_traversal_longest_path.
topological_sort_traversal_longest_path gives better
results but needs a bit more computation.
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
"""
"""
1) Use a BFS traversal of the execution graph to generate an
ordering of the node executions.
2) Generate use-def ranges for each `blob` in the BFS traversal
order.
3) Assign blobs to `canonical blobs`
4) Rename blobs to canonical blobs
"""
net = copy.deepcopy(net)
g = compute_interference_graph(net.op)
ordering = ordering_function(g)
linearized_ops = [net.op[i] for i in ordering]
# Reorder ops in net based on the computed linearlized order.
# If the graph has multiple topological orderings and if the NetDef's
# ordering differs from the order used to compute ranges, then the
# runtime might end up overwriting blobs before they are used.
del net.op[:]
net.op.extend(linearized_ops)
ranges = compute_ranges(linearized_ops, blob_sizes)
assignments = compute_assignments(ranges, static_blobs, algo)
blob_assignments = compute_blob_assignments(assignments)
apply_assignments(net, blob_assignments)
return Optimization(
net=net,
blob_assignments=blob_assignments,
assignments=assignments)
Statistics = collections.namedtuple(
'Statistics', ['baseline_nbytes', 'optimized_nbytes'])
def compute_statistics(assignments):
def blob_nbytes(blob):
return workspace.FetchBlob(blob).nbytes
blob_bytes = {
blob: blob_nbytes(blob) for assignment in assignments
for (blob, _) in assignment}
baseline_nbytes = sum(v for _, v in blob_bytes.items())
optimized_nbytes = sum(
max(blob_bytes[blob] for (blob, _) in assignment)
for assignment in assignments)
return Statistics(
baseline_nbytes=baseline_nbytes,
optimized_nbytes=optimized_nbytes)
def collect_blob_sizes(net):
''' Collect blob sizes from workspace '''
def blob_nbytes(blob):
return workspace.FetchBlob(blob).nbytes
blobs = {}
for op in net.op:
for blob in op.input:
blobs[blob] = blob_nbytes(blob)
for blob in op.output:
blobs[blob] = blob_nbytes(blob)
return blobs
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.cnn import CNNModelHelper
import unittest
import numpy as np
class BrewTest(unittest.TestCase):
def setUp(self):
def myhelper(model, val=-1):
return val
if not brew.has_helper(myhelper):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=-1):
return val
if not brew.has_helper(myhelper2):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name="test_model")
def test_dropout(self):
p = 0.2
X = np.ones((100, 100)).astype(np.float32) - p
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.dropout(model, "x", "out")
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertLess(abs(out.mean() - (1 - p)), 0.05)
def test_fc(self):
m, n, k = (15, 15, 15)
X = np.random.rand(m, k).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.fc(model, "x", "out_1", k, n)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
def test_arg_scope(self):
myhelper = self.myhelper
myhelper2 = self.myhelper2
n = 15
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=3,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 64, 17, 17))
def test_arg_scope_nested(self):
myhelper = self.myhelper
n = 16
with brew.arg_scope([myhelper], val=-3), \
brew.arg_scope([myhelper], val=-2):
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
self.assertEqual(res, 15)
def test_double_register(self):
myhelper = self.myhelper
with self.assertRaises(AttributeError):
brew.Register(myhelper)
def test_has_helper(self):
self.assertTrue(brew.has_helper(brew.conv))
self.assertTrue(brew.has_helper("conv"))
def myhelper3():
pass
self.assertFalse(brew.has_helper(myhelper3))
def test_model_helper(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
my_arg_scope = {'order': 'NHWC'}
model = ModelHelper(name="test_model", arg_scope=my_arg_scope)
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=3,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 17, 17, 64))
def test_cnn_model_helper_deprecated(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
# CNNModelHelper is going to be deprecated soon. This test is only
# covering some CNNModelHelper logic
model = CNNModelHelper(name="test_model", order='NHWC')
self.assertEqual(model.arg_scope['order'], 'NHWC')
|
## @package layer_test_util
# Module caffe2.python.layer_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import (
core,
layer_model_instantiator,
layer_model_helper,
schema,
test_util,
workspace,
)
import numpy as np
OpSpec = namedtuple("OpSpec", "type input output")
class LayersTestCase(test_util.TestCase):
def setUp(self):
super(LayersTestCase, self).setUp()
self.setup_example()
def setup_example(self):
"""
This is undocumented feature in hypothesis,
https://github.com/HypothesisWorks/hypothesis-python/issues/59
"""
workspace.ResetWorkspace()
self.reset_model()
def reset_model(self, input_feature_schema=None, trainer_extra_schema=None):
input_feature_schema = input_feature_schema or schema.Struct(
('float_features', schema.Scalar((np.float32, (32,)))),
)
trainer_extra_schema = trainer_extra_schema or schema.Struct()
self.model = layer_model_helper.LayerModelHelper(
'test_model',
input_feature_schema=input_feature_schema,
trainer_extra_schema=trainer_extra_schema)
def new_record(self, schema_obj):
return schema.NewRecord(self.model.net, schema_obj)
def get_training_nets(self):
"""
We don't use
layer_model_instantiator.generate_training_nets_forward_only()
here because it includes initialization of global constants, which make
testing tricky
"""
train_net = core.Net('train_net')
train_init_net = core.Net('train_init_net')
for layer in self.model.layers:
layer.add_operators(train_net, train_init_net)
return train_init_net, train_net
def get_eval_net(self):
return layer_model_instantiator.generate_eval_net(self.model)
def get_predict_net(self):
return layer_model_instantiator.generate_predict_net(self.model)
def run_train_net(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
def run_train_net_forward_only(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets_forward_only(
self.model)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
def assertBlobsEqual(self, spec_blobs, op_blobs):
"""
spec_blobs can either be None or a list of blob names. If it's None,
then no assertion is performed. The elements of the list can be None,
in that case, it means that position will not be checked.
"""
if spec_blobs is None:
return
self.assertEqual(len(spec_blobs), len(op_blobs))
for spec_blob, op_blob in zip(spec_blobs, op_blobs):
if spec_blob is None:
continue
self.assertEqual(spec_blob, op_blob)
def assertNetContainOps(self, net, op_specs):
"""
Given a net and a list of OpSpec's, check that the net match the spec
"""
ops = net.Proto().op
self.assertEqual(len(op_specs), len(ops))
for op, op_spec in zip(ops, op_specs):
self.assertEqual(op_spec.type, op.type)
self.assertBlobsEqual(op_spec.input, op.input)
self.assertBlobsEqual(op_spec.output, op.output)
return ops
|
import numpy as np
import unittest
import sys
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
from caffe2.python import core, cnn, workspace, device_checker, test_util
class TestMiniAlexNet(test_util.TestCase):
def _MiniAlexNetNoDropout(self, order):
# First, AlexNet using the cnn wrapper.
model = cnn.CNNModelHelper(order, name="alexnet")
conv1 = model.Conv(
"data",
"conv1",
3,
16,
11,
("XavierFill", {}),
("ConstantFill", {}),
stride=4,
pad=0
)
relu1 = model.Relu(conv1, "relu1")
norm1 = model.LRN(relu1, "norm1", size=5, alpha=0.0001, beta=0.75)
pool1 = model.MaxPool(norm1, "pool1", kernel=3, stride=2)
conv2 = model.GroupConv(
pool1,
"conv2",
16,
32,
5,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
stride=1,
pad=2
)
relu2 = model.Relu(conv2, "relu2")
norm2 = model.LRN(relu2, "norm2", size=5, alpha=0.0001, beta=0.75)
pool2 = model.MaxPool(norm2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
32,
64,
3,
("XavierFill", {'std': 0.01}),
("ConstantFill", {}),
pad=1
)
relu3 = model.Relu(conv3, "relu3")
conv4 = model.GroupConv(
relu3,
"conv4",
64,
64,
3,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
pad=1
)
relu4 = model.Relu(conv4, "relu4")
conv5 = model.GroupConv(
relu4,
"conv5",
64,
32,
3,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
pad=1
)
relu5 = model.Relu(conv5, "relu5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 1152, 1024, ("XavierFill", {}),
("ConstantFill", {"value": 0.1})
)
relu6 = model.Relu(fc6, "relu6")
fc7 = model.FC(
relu6, "fc7", 1024, 1024, ("XavierFill", {}),
("ConstantFill", {"value": 0.1})
)
relu7 = model.Relu(fc7, "relu7")
fc8 = model.FC(
relu7, "fc8", 1024, 5, ("XavierFill", {}),
("ConstantFill", {"value": 0.0})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss([xent], ["loss"])
model.AddGradientOperators([loss])
return model
def _testMiniAlexNet(self, order):
# First, we get all the random initialization of parameters.
model = self._MiniAlexNetNoDropout(order)
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
inputs = dict(
[(str(name), workspace.FetchBlob(str(name))) for name in
model.params]
)
if order == "NCHW":
inputs["data"] = np.random.rand(4, 3, 227, 227).astype(np.float32)
else:
inputs["data"] = np.random.rand(4, 227, 227, 3).astype(np.float32)
inputs["label"] = np.array([1, 2, 3, 4]).astype(np.int32)
cpu_device = caffe2_pb2.DeviceOption()
cpu_device.device_type = caffe2_pb2.CPU
gpu_device = caffe2_pb2.DeviceOption()
gpu_device.device_type = caffe2_pb2.CUDA
checker = device_checker.DeviceChecker(0.05, [cpu_device, gpu_device])
ret = checker.CheckNet(
model.net.Proto(),
inputs,
# The indices sometimes may be sensitive to small numerical
# differences in the input, so we ignore checking them.
ignore=['_pool1_idx', '_pool2_idx', '_pool5_idx']
)
self.assertEqual(ret, True)
@unittest.skipIf(not workspace.has_gpu_support,
"No GPU support. Skipping test.")
def testMiniAlexNetNCHW(self):
self._testMiniAlexNet("NCHW")
# No Group convolution support for NHWC right now
#@unittest.skipIf(not workspace.has_gpu_support,
# "No GPU support. Skipping test.")
#def testMiniAlexNetNHWC(self):
# self._testMiniAlexNet("NHWC")
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import net_printer
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output
import unittest
def example_loop():
with Task():
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
def example_task():
with Task():
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
return o6, o7_1, o7_2
def example_job():
with Job() as job:
with job.init_group:
example_loop()
example_task()
return job
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
with job:
with Task():
# distributed_ctx_init_* ignored by analyzer
ops.Add(['distributed_ctx_init_a', 'distributed_ctx_init_b'])
net_printer.analyze(example_job())
def test_undefined_blob(self):
job = example_job()
with job:
with Task():
ops.Add(['a', 'b'])
with self.assertRaises(AssertionError):
net_printer.analyze(job)
def test_multiple_definition(self):
job = example_job()
with job:
with Task():
ops.Add([ops.Const(0), ops.Const(1)], 'out1')
with Task():
ops.Add([ops.Const(2), ops.Const(3)], 'out1')
with self.assertRaises(AssertionError):
net_printer.analyze(job)
|
## @package dataio
# Module caffe2.python.dataio
"""
Defines the base interface for reading and writing operations.
Readers/Writers are objects that produce operations that read/write sequences
of data. Each operation reads or writes a list of BlobReferences.
Readers and Writers must be implemented such that read and write operations
are atomic and thread safe.
Examples of possible Readers and Writers:
QueueReader, QueueWriter,
DatasetReader, DatasetWriter,
See `dataset.py` for an example of implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.schema import Field, Struct, from_blob_list
import numpy as np
class Reader(object):
def __init__(self, schema=None):
if schema is not None:
assert isinstance(schema, Field)
self._schema = schema
def schema(self):
"""
Return the schema associated with the Reader
"""
assert self._schema is not None, 'Schema not provided for this reader.'
return self._schema
def _set_schema(self, schema):
self._schema = schema
def setup_ex(self, init_net, finish_net):
"""Nets to be executed once at startup and finish.
Experimental extension. Don't use yet"""
pass
def read_ex(self, local_init_net, local_finish_net):
"""Experimental extension to the interface. Don't use yet"""
read_net = core.Net('reader_body')
return ([read_net], ) + self.read(read_net)
def read_record_ex(self, local_init_net, local_finish_net):
"""Experimental extension to the interface. Don't use yet"""
nets, should_stop, fields = self.read_ex(
local_init_net, local_finish_net)
if self._schema:
fields = from_blob_list(self._schema, fields)
return nets, should_stop, fields
"""
Reader is a abstract class to be implemented in order to provide
operations capable of iterating through a dataset or stream of data.
A Reader must implement at least one operation, `read`, which
adds operations to a net that read the next batch of data. Readers can
optionally support the `reset` operation, which is useful when multiple
passes over the data are required.
"""
def read(self, read_net):
"""
Add operations to read_net that will read the read batch of data
and return a list of BlobReference representing the blobs that will
contain the batches produced.
Operations added to `read_net` must be thread safe and atomic, that is,
it should be possible to clone `read_net` and run multiple instances of
it in parallel.
Args:
read_net: the net that will be appended with read operations
Returns:
A tuple (should_stop, fields), with:
should_stop: BlobReference pointing to a boolean scalar
blob that indicates whether the read operation
was succesfull or whether the end of data has
been reached.
fields: A tuple of BlobReference containing the latest batch
of data that was read.
"""
raise NotImplementedError('Readers must implement `read`.')
def reset(self, net):
"""Append operations to `net` that will reset the reader.
This can be used to read the data multiple times.
Not all readers support this operation.
"""
raise NotImplementedError('This reader cannot be resetted.')
def read_record(self, read_net):
should_stop, fields = self.read(read_net)
if self._schema:
fields = from_blob_list(self._schema, fields)
return should_stop, fields
def execution_step(self, reader_net_name=None, external_should_stop=None):
"""Create an execution step with a net containing read operators.
The execution step will contain a `stop_blob` that knows how to stop
the execution loop when end of data was reached.
E.g.:
read_step, fields = reader.execution_step()
consume_net = core.Net('consume')
consume_net.Print(fields[0], [])
p = core.Plan('reader')
p.AddStep(read_step.AddNet(consume_net))
core.RunPlan(p)
Args:
reader_net_name: (optional) the name of the reader_net to be
created. The execution step will
be named accordingly.
Returns:
A tuple (read_step, fields), with:
read_step: A newly created execution step containing a net with
read operations. The step will have `stop_blob` set,
in order to stop the loop on end of data.
fields: A tuple of BlobReference containing the latest batch
of data that was read.
"""
reader_net = core.Net(reader_net_name or 'reader')
should_stop, fields = self.read_record(reader_net)
if external_should_stop is not None:
should_stop = reader_net.Or([external_should_stop, should_stop])
read_step = core.execution_step(
'{}_step'.format(reader_net_name),
reader_net,
should_stop_blob=should_stop)
return (read_step, fields)
class Writer(object):
"""
Writer is a abstract class to be implemented in order to provide
operations capable of feeding a data stream or a dataset.
A Writer must implement 2 operations:
`write`, which adds operations to a net that write the write batch of
data, and `commit`, which adds operations to a net in order to indicate
that no more data will be written.
"""
_schema = None
def schema(self):
return self._schema
def write(self, writer_net, fields):
"""Add operations to `writer_net` that write the next batch of data.
Operations added to the net must be thread-safe and unique, that is:
multiple writers must be able to write to the dataset in parallel.
Args:
fields: a tuple of BlobReference containing the batch of data to
write.
"""
raise NotImplementedError('Writers must implement write.')
def write_record(self, writer_net, fields):
if isinstance(fields, Field):
self._schema = fields
fields = fields.field_blobs()
self.write(writer_net, fields)
def setup_ex(self, init_net, finish_net):
"""Experimental, don't use yet"""
self.commit(finish_net)
def write_ex(self, fields, local_init_net, local_finish_net, stop_blob):
"""Experimental extension to the interface. Don't use yet"""
write_net = core.Net('write_net')
self.write(write_net, fields)
return [write_net]
def write_record_ex(
self, fields, local_init_net, local_finish_net, stop_blob=None):
"""Experimental extension to the interface. Don't use yet."""
if isinstance(fields, Field):
self._schema = fields
fields = fields.field_blobs()
if stop_blob is None:
stop_blob = local_init_net.NextName("dequeue_status")
write_nets = self.write_ex(
fields, local_init_net, local_finish_net, stop_blob)
return (write_nets, stop_blob)
def commit(self, finish_net):
"""Add operations to `finish_net` that signal end of data.
This must be implemented by all Writers, but may be no-op for some
of them.
"""
pass
class ReaderBuilder(object):
""" Allow usage of a reader in distributed fashion. """
def schema(self):
raise NotImplementedError()
def enqueue_splits(self, net, split_queue):
raise NotImplementedError()
def splits(self, net):
raise NotImplementedError()
def new_reader(self, split_queue):
raise NotImplementedError()
class PipedReaderBuilder(ReaderBuilder):
"""
ReaderBuilder that modifies underlying builder by calling `piper`
function on each new reader produced, and return the result of
the function. This way, it is possible to append data processing
pipelines that will be replicated for each reader that gets created.
E.g.:
PipedReaderBuilder(
ReaderBuilder(...),
lambda reader: pipe(reader, processor=my_proc))
"""
def __init__(self, builder, piper):
self._builder = builder
self._piper = piper
def schema(self):
return self._builder.schema()
def enqueue_splits(self, net, split_queue):
return self._builder.enqueue_splits(net, split_queue)
def splits(self, net):
return self._builder.splits(net)
def new_reader(self, split_queue):
output = self._piper(self._builder.new_reader(split_queue))
return output if isinstance(output, Reader) else output.reader()
class Pipe(object):
def __init__(self, schema=None, obj_key=None):
self._num_writers = 0
self._num_readers = 0
self._schema = schema
self._obj_key = obj_key
def schema(self):
return self._schema
def setup(self, global_init_net):
pass
def reader(self):
raise NotImplementedError()
def writer(self):
raise NotImplementedError()
def num_readers(self):
return self._num_readers
def num_writers(self):
return self._num_writers
def _new_writer(self, writer_schema, writer_init_net):
if writer_schema is not None and self._schema is None:
self._schema = writer_schema
self._num_writers += 1
if self._obj_key is not None:
writer_init_net.add_attribute(self._obj_key, self)
def _new_reader(self, reader_init_net):
self._num_readers += 1
if self._obj_key is not None:
reader_init_net.add_attribute(self._obj_key, self)
class CounterReader(Reader):
""" Reader that produces increasing integers. """
def __init__(self):
Reader.__init__(self, schema=Struct(('iter', np.int64)))
self.counter = None
self.should_stop = None
def setup_ex(self, global_init_net, global_finish_net):
if self.counter is None:
self.counter = global_init_net.CreateCounter([], init_count=0)
self.should_stop = global_init_net.ConstantFill(
[], shape=[], dtype=core.DataType.BOOL, value=False)
def read_ex(self, local_init_net, local_finish_net):
count_net = core.Net('limited_reader_counter')
value = count_net.CountUp([self.counter], 1)
return [count_net], self.should_stop, [value]
class ReaderWithLimit(Reader):
"""
Reader that stops after `num_iter` calls.
If num_iter is None it becomes just a simple reader that exports a global
flag for "out of data".
"""
def __init__(self, reader, num_iter=1):
Reader.__init__(self, schema=reader._schema)
self.reader = reader
self.counter = None
self.num_iter = num_iter
net = core.Net('reader_with_limit')
self._data_finished = net.AddExternalInput(
net.NextName('data_finished'))
if self.num_iter is not None:
self.counter = net.AddExternalInput(net.NextName('counter'))
def setup_ex(self, global_init_net, global_finish_net):
if self.counter:
global_init_net.CreateCounter(
[], [self.counter], init_count=int(self.num_iter))
self.reader.setup_ex(global_init_net, global_finish_net)
global_init_net.ConstantFill(
[], [self._data_finished],
shape=[], value=False, dtype=core.DataType.BOOL)
def read_ex(self, local_init_net, local_finish_net):
""" 1. check if we reached number of iterations and populate the same
should_stop blob """
count_net = core.Net('limited_reader_counter')
if self.counter:
should_stop = count_net.CountDown([self.counter], 1)
else:
should_stop = count_net.ConstantFill(
[], 1,
shape=[], value=False, dtype=core.DataType.BOOL)
""" 2. call original reader """
nets, local_data_finished, fields = self.reader.read_ex(
local_init_net, local_finish_net)
self._set_schema(self.reader._schema)
""" 3. check if original reader is done. """
check_done_net = core.Net('limited_reader_post')
# copy to the same blob as the counter output to trigger reader
# stopping
check_done_net.Copy(local_data_finished, should_stop)
# update global flag that underlying reader is done
check_done_net.Or([self._data_finished, local_data_finished],
[self._data_finished])
# this relies on `should_stop` being called after each net.
return [count_net] + nets + [check_done_net], should_stop, fields
def data_finished(self):
"""
Return a blob that can be checked after the end of the reading task,
which will contain a scalar float indicating whether the underlying
reader has been exhausted (True) or whether we stopped because reached
the limit of iterations (False).
"""
return self._data_finished
def CountUntil(num_iter):
return ReaderWithLimit(CounterReader(), num_iter)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
import numpy as np
import unittest
import pickle
class TestDB(unittest.TestCase):
def testPicklable(self):
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.List(schema.Scalar(dtype=str)))
)
s2 = pickle.loads(pickle.dumps(s))
for r in (s, s2):
self.assertTrue(isinstance(r.field1, schema.Scalar))
self.assertTrue(isinstance(r.field2, schema.List))
self.assertTrue(getattr(r, 'non_existent', None) is None)
def testNormalizeField(self):
s = schema.Struct(('field1', np.int32), ('field2', str))
self.assertEquals(
s,
schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.Scalar(dtype=str))
)
)
def testTuple(self):
s = schema.Tuple(np.int32, str, np.float32)
s2 = schema.Struct(
('field_0', schema.Scalar(dtype=np.int32)),
('field_1', schema.Scalar(dtype=np.str)),
('field_2', schema.Scalar(dtype=np.float32))
)
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
schema.Struct(
('field_2', schema.Scalar(dtype=np.float32)),
('field_0', schema.Scalar(dtype=np.int32)),
)
)
# test iterator behavior
for i, (v1, v2) in enumerate(zip(s, s2)):
self.assertEquals(v1, v2)
self.assertEquals(s[i], v1)
self.assertEquals(s2[i], v1)
def testRawTuple(self):
s = schema.RawTuple(2)
self.assertEquals(
s, schema.Struct(
('field_0', schema.Scalar()), ('field_1', schema.Scalar())
)
)
self.assertEquals(s[0], schema.Scalar())
self.assertEquals(s[1], schema.Scalar())
def testStructIndexing(self):
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.List(schema.Scalar(dtype=str))),
('field3', schema.Struct()),
)
self.assertEquals(s['field2'], s.field2)
self.assertEquals(s['field2'], schema.List(schema.Scalar(dtype=str)))
self.assertEquals(s['field3'], schema.Struct())
self.assertEquals(
s['field2', 'field1'],
schema.Struct(
('field2', schema.List(schema.Scalar(dtype=str))),
('field1', schema.Scalar(dtype=np.int32)),
)
)
def testListInStructIndexing(self):
a = schema.List(schema.Scalar(dtype=str))
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', a)
)
self.assertEquals(s['field2:lengths'], a.lengths)
self.assertEquals(s['field2:items'], a.items)
with self.assertRaises(KeyError):
s['fields2:items:non_existent']
with self.assertRaises(KeyError):
s['fields2:non_existent']
def testMapInStructIndexing(self):
a = schema.Map(
schema.Scalar(dtype=np.int32),
schema.Scalar(dtype=np.float32),
)
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', a)
)
self.assertEquals(s['field2:keys'], a.keys)
self.assertEquals(s['field2:values'], a.values)
with self.assertRaises(KeyError):
s['fields2:keys:non_existent']
def testPreservesMetadata(self):
s = schema.Struct(
('a', schema.Scalar(np.float32)), (
'b', schema.Scalar(
np.int32,
metadata=schema.Metadata(categorical_limit=5)
)
), (
'c', schema.List(
schema.Scalar(
np.int32,
metadata=schema.Metadata(categorical_limit=6)
)
)
)
)
# attach metadata to lengths field
s.c.lengths.set_metadata(schema.Metadata(categorical_limit=7))
self.assertEqual(None, s.a.metadata)
self.assertEqual(5, s.b.metadata.categorical_limit)
self.assertEqual(6, s.c.value.metadata.categorical_limit)
self.assertEqual(7, s.c.lengths.metadata.categorical_limit)
sc = s.clone()
self.assertEqual(None, sc.a.metadata)
self.assertEqual(5, sc.b.metadata.categorical_limit)
self.assertEqual(6, sc.c.value.metadata.categorical_limit)
self.assertEqual(7, sc.c.lengths.metadata.categorical_limit)
sv = schema.from_blob_list(
s, [
np.array([3.4]), np.array([2]), np.array([3]),
np.array([1, 2, 3])
]
)
self.assertEqual(None, sv.a.metadata)
self.assertEqual(5, sv.b.metadata.categorical_limit)
self.assertEqual(6, sv.c.value.metadata.categorical_limit)
self.assertEqual(7, sv.c.lengths.metadata.categorical_limit)
def testDupField(self):
with self.assertRaises(ValueError):
schema.Struct(
('a', schema.Scalar()),
('a', schema.Scalar()))
def testAssignToField(self):
with self.assertRaises(TypeError):
s = schema.Struct(('a', schema.Scalar()))
s.a = schema.Scalar()
def testPreservesEmptyFields(self):
s = schema.Struct(
('a', schema.Scalar(np.float32)),
('b', schema.Struct()),
)
sc = s.clone()
self.assertIn("a", sc.fields)
self.assertIn("b", sc.fields)
sv = schema.from_blob_list(s, [np.array([3.4])])
self.assertIn("a", sv.fields)
self.assertIn("b", sv.fields)
self.assertEqual(0, len(sv.b.fields))
def testStructAddition(self):
s1 = schema.Struct(
('a', schema.Scalar())
)
s2 = schema.Struct(
('b', schema.Scalar())
)
s = s1 + s2
self.assertIn("a", s.fields)
self.assertIn("b", s.fields)
with self.assertRaises(TypeError):
s1 + s1
with self.assertRaises(TypeError):
s1 + schema.Scalar()
def testStructNestedAddition(self):
s1 = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Scalar())
)),
)
s2 = schema.Struct(
('b', schema.Struct(
('d', schema.Scalar())
))
)
s = s1 + s2
self.assertEqual(['a', 'b:c', 'b:d'], s.field_names())
s3 = schema.Struct(
('b', schema.Scalar()),
)
with self.assertRaises(TypeError):
s = s1 + s3
def testGetFieldByNestedName(self):
st = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Struct(
('d', schema.Scalar()),
)),
)),
)
self.assertRaises(KeyError, st.__getitem__, '')
self.assertRaises(KeyError, st.__getitem__, 'x')
self.assertRaises(KeyError, st.__getitem__, 'x:y')
self.assertRaises(KeyError, st.__getitem__, 'b:c:x')
a = st['a']
self.assertTrue(isinstance(a, schema.Scalar))
bc = st['b:c']
self.assertIn('d', bc.fields)
bcd = st['b:c:d']
self.assertTrue(isinstance(bcd, schema.Scalar))
def testAddFieldByNestedName(self):
f_a = schema.Scalar(blob=core.BlobReference('blob1'))
f_b = schema.Struct(
('c', schema.Struct(
('d', schema.Scalar(blob=core.BlobReference('blob2'))),
)),
)
f_x = schema.Struct(
('x', schema.Scalar(blob=core.BlobReference('blob3'))),
)
with self.assertRaises(TypeError):
st = schema.Struct(
('a', f_a),
('b', f_b),
('b:c:d', f_x),
)
with self.assertRaises(TypeError):
st = schema.Struct(
('a', f_a),
('b', f_b),
('b:c:d:e', f_x),
)
st = schema.Struct(
('a', f_a),
('b', f_b),
('e:f', f_x),
)
self.assertEqual(['a', 'b:c:d', 'e:f:x'], st.field_names())
self.assertEqual(['blob1', 'blob2', 'blob3'], st.field_blobs())
st = schema.Struct(
('a', f_a),
('b:c:e', f_x),
('b', f_b),
)
self.assertEqual(['a', 'b:c:e:x', 'b:c:d'], st.field_names())
self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())
st = schema.Struct(
('a:a1', f_a),
('b:b1', f_b),
('a', f_x),
)
self.assertEqual(['a:a1', 'a:x', 'b:b1:c:d'], st.field_names())
self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())
def testContains(self):
st = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Struct(
('d', schema.Scalar()),
)),
)),
)
self.assertTrue('a' in st)
self.assertTrue('b:c' in st)
self.assertTrue('b:c:d' in st)
self.assertFalse('' in st)
self.assertFalse('x' in st)
self.assertFalse('b:c:x' in st)
self.assertFalse('b:c:d:x' in st)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 33