python_code
stringlengths 0
66.4k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
cvpr19_models.models.evaluator.py
=================================
Active acquisition model as described in `Zhang, Zizhao, et al. "Reducing uncertainty in
undersampled mri reconstruction with active acquisition." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2019.`
"""
import functools
from typing import Optional
import torch
import torch.nn as nn
from . import fft_utils, reconstruction
class SimpleSequential(nn.Module):
def __init__(self, net1, net2):
super(SimpleSequential, self).__init__()
self.net1 = net1
self.net2 = net2
def forward(self, x, mask):
output = self.net1(x, mask)
return self.net2(output, mask)
class SpectralMapDecomposition(nn.Module):
def __init__(self):
super(SpectralMapDecomposition, self).__init__()
def forward(self, reconstructed_image, mask_embedding, mask):
batch_size = reconstructed_image.shape[0]
height = reconstructed_image.shape[2]
width = reconstructed_image.shape[3]
# create spectral maps in kspace
kspace = fft_utils.fft(reconstructed_image)
kspace = kspace.unsqueeze(1).repeat(1, width, 1, 1, 1)
# separate image into spectral maps
separate_mask = torch.zeros([1, width, 1, 1, width], dtype=torch.float32)
for i in range(width):
separate_mask[0, i, 0, 0, i] = 1
separate_mask = separate_mask.to(reconstructed_image.device)
masked_kspace = torch.where(
separate_mask.byte(), kspace, torch.tensor(0.0).to(kspace.device)
)
masked_kspace = masked_kspace.view(batch_size * width, 2, height, width)
# convert spectral maps to image space
separate_images = fft_utils.ifft(masked_kspace)
# result is (batch, [real_M0, img_M0, real_M1, img_M1, ...], height, width]
separate_images = separate_images.contiguous().view(
batch_size, 2, width, height, width
)
# add mask information as a summation -- might not be optimal
if mask is not None:
separate_images = (
separate_images + mask.permute(0, 3, 1, 2).unsqueeze(1).detach()
)
separate_images = separate_images.contiguous().view(
batch_size, 2 * width, height, width
)
# concatenate mask embedding
if mask_embedding is not None:
spectral_map = torch.cat([separate_images, mask_embedding], dim=1)
else:
spectral_map = separate_images
return spectral_map
class EvaluatorNetwork(nn.Module):
"""Evaluator network used in Zhang et al., CVPR'19.
Args:
number_of_filters(int): Number of filters used in convolutions. Defaults to 256. \n
number_of_conv_layers(int): Depth of the model defined as a number of
convolutional layers. Defaults to 4.
use_sigmoid(bool): Whether the sigmoid non-linearity is applied to the
output of the network. Defaults to False.
width(int): The width of the image. Defaults to 128 (corresponds to DICOM).
height(Optional[int]): The height of the image. If ``None`` the value of ``width``.
is used. Defaults to ``None``.
mask_embed_dim(int): Dimensionality of the mask embedding.
num_output_channels(Optional[int]): The dimensionality of the output. If ``None``,
the value of ``width`` is used. Defaults to ``None``.
"""
def __init__(
self,
number_of_filters: int = 256,
number_of_conv_layers: int = 4,
use_sigmoid: bool = False,
width: int = 128,
height: Optional[int] = None,
mask_embed_dim: int = 6,
num_output_channels: Optional[int] = None,
):
print(f"[EvaluatorNetwork] -> n_layers = {number_of_conv_layers}")
super(EvaluatorNetwork, self).__init__()
self.spectral_map = SpectralMapDecomposition()
self.mask_embed_dim = mask_embed_dim
if height is None:
height = width
number_of_input_channels = 2 * width + mask_embed_dim
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
sequence = [
nn.Conv2d(
number_of_input_channels,
number_of_filters,
kernel_size=4,
stride=2,
padding=1,
),
nn.LeakyReLU(0.2, True),
]
in_channels = number_of_filters
for n in range(1, number_of_conv_layers):
if n < number_of_conv_layers - 1:
if n <= 4:
out_channels = in_channels * 2
else:
out_channels = in_channels // 2
else:
out_channels = in_channels
sequence += [
nn.Conv2d(
in_channels,
out_channels,
kernel_size=4,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(out_channels),
nn.LeakyReLU(0.2, True),
]
in_channels = out_channels
kernel_size_width = width // 2 ** number_of_conv_layers
kernel_size_height = height // 2 ** number_of_conv_layers
sequence += [nn.AvgPool2d(kernel_size=(kernel_size_height, kernel_size_width))]
if num_output_channels is None:
num_output_channels = width
sequence += [
nn.Conv2d(
in_channels, num_output_channels, kernel_size=1, stride=1, padding=0
)
]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
self.apply(reconstruction.init_func)
def forward(
self,
input_tensor: torch.Tensor,
mask_embedding: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
):
"""Computes scores for each k-space column.
Args:
input_tensor(torch.Tensor): Batch of reconstructed images,
as produced by :class:`models.reconstruction.ReconstructorNetwork`.
mask_embedding(Optional[torch.Tensor]): Corresponding batch of mask embeddings
produced by :class:`models.reconstruction.ReconstructorNetwork`, if needed.
mask(Optional[torch.Tensor]): Corresponding masks arrays, if needed.
Returns:
torch.Tensor: Evaluator score for each k-space column in each image in the batch.
"""
spectral_map_and_mask_embedding = self.spectral_map(
input_tensor, mask_embedding, mask
)
return self.model(spectral_map_and_mask_embedding).squeeze(3).squeeze(2)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
import torch.utils.data
import activemri.experimental.cvpr19_models.models.fft_utils as fft_utils
class Slice(torch.utils.data.Dataset):
def __init__(
self,
transform,
dicom_root,
which="train",
resolution=320,
scan_type=None,
num_volumes=None,
num_rand_slices=None,
):
self.transform = transform
self.dataset = _DicomDataset(
dicom_root / str(resolution) / which, scan_type, num_volumes=num_volumes
)
self.num_slices = self.dataset.metadata["num_slices"]
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState()
def __getitem__(self, i):
i = int(i)
if self.num_rand_slices is None:
volume_i, slice_i = divmod(i, self.num_slices)
else:
volume_i = (i * self.num_slices // self.num_rand_slices) // self.num_slices
slice_ids = list(range(self.num_slices))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
slice_i = slice_ids[i % self.num_rand_slices]
volume, volume_metadata = self.dataset[volume_i]
slice = volume[slice_i : slice_i + 1]
slice = slice.astype(np.float32)
return self.transform(slice, volume_metadata["mean"], volume_metadata["std"])
def __len__(self):
if self.num_rand_slices is None:
return len(self.dataset) * self.num_slices
else:
return len(self.dataset) * self.num_rand_slices
class DicomDataTransform:
# If `seed` is none and `seed_per_image` is True, masks will be generated with a unique seed
# per image, computed as `seed = int( 1009 * image.sum().abs())`.
def __init__(self, mask_func, fixed_seed=None, seed_per_image=False):
self.mask_func = mask_func
self.fixed_seed = fixed_seed
self.seed_per_image = seed_per_image
def __call__(self, image, mean, std):
image = (image - mean) / (std + 1e-12)
image = torch.from_numpy(image)
image = fft_utils.dicom_to_0_1_range(image)
shape = np.array(image.shape)
seed = (
int(1009 * image.sum().abs())
if self.fixed_seed is None and self.seed_per_image
else self.fixed_seed
)
mask = self.mask_func(shape, seed) if self.mask_func is not None else None
image = torch.cat([image, torch.zeros_like(image)], dim=0)
return mask, image
class _DicomDataset:
def __init__(self, root, scan_type=None, num_volumes=None):
self.metadata = json.load(open(os.path.join(root, "metadata.json")))
shape = (
len(self.metadata["volumes"]),
self.metadata["num_slices"],
self.metadata["resolution"],
self.metadata["resolution"],
)
self.volumes = np.memmap(
os.path.join(root, "data.bin"), self.metadata["dtype"], "r"
).reshape(shape)
volume_ids = []
for i, volume in enumerate(self.metadata["volumes"]):
if scan_type == "all" or volume["scan_type"] == scan_type:
volume_ids.append(i)
if num_volumes is not None:
rng = np.random.RandomState(1234)
rng.shuffle(volume_ids)
volume_ids = volume_ids[:num_volumes]
self.volume_ids = {i: id for i, id in enumerate(volume_ids)}
def __getitem__(self, i):
""" returns (data: 4d array, metadata: dict) """
id = self.volume_ids[i]
return self.volumes[id], self.metadata["volumes"][id]
def __len__(self):
return len(self.volume_ids)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pathlib
import numpy as np
import torch
import torch.utils.data
from . import dicom_data_loader, masking_utils, raw_data_loader
def get_train_valid_loader(
dataset_dir,
batch_size,
num_workers=4,
pin_memory=False,
which_dataset="KNEE",
mask_type="basic",
rnl_params=None,
num_volumes_train=None,
num_volumes_val=None,
):
if which_dataset == "KNEE":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
dicom_root = pathlib.Path(dataset_dir)
data_transform = dicom_data_loader.DicomDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
train_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="train",
resolution=128,
scan_type="all",
num_volumes=num_volumes_train,
num_rand_slices=None,
)
valid_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="val",
resolution=128,
scan_type="all",
num_volumes=num_volumes_val,
num_rand_slices=None,
)
elif which_dataset == "KNEE_RAW":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
raw_root = dataset_dir
if not os.path.isdir(raw_root):
raise ImportError(raw_root + " not exists. Change to the right path.")
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=False
)
train_data = raw_data_loader.RawSliceData(
os.path.join(raw_root, "knee_singlecoil_train"),
transform=data_transform,
num_cols=368,
num_volumes=num_volumes_train,
)
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
valid_data = raw_data_loader.RawSliceData(
os.path.join(raw_root, "knee_singlecoil_val"),
transform=data_transform,
num_cols=368,
num_volumes=num_volumes_val,
custom_split="val",
)
else:
raise ValueError
def init_fun(_):
return np.random.seed(None)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=None,
shuffle=True,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=batch_size,
sampler=None,
shuffle=True,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
return train_loader, valid_loader
def get_test_loader(
dataset_dir,
batch_size,
num_workers=2,
pin_memory=False,
which_dataset="KNEE",
mask_type="basic",
rnl_params=None,
):
if which_dataset == "KNEE":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
dicom_root = pathlib.Path(dataset_dir)
data_transform = dicom_data_loader.DicomDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
test_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="public_leaderboard",
resolution=128,
scan_type="all",
num_volumes=None,
num_rand_slices=None,
)
def init_fun(_):
return np.random.seed()
data_loader = torch.utils.data.DataLoader(
test_data,
batch_size=batch_size,
sampler=None,
shuffle=False,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
elif which_dataset == "KNEE_RAW":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
raw_root = dataset_dir
if not os.path.isdir(raw_root):
raise ImportError(raw_root + " not exists. Change to the right path.")
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
test_data = raw_data_loader.RawSliceData(
raw_root + "/knee_singlecoil_val",
transform=data_transform,
num_cols=368,
custom_split="test",
)
def init_fun(_):
return np.random.seed(None)
data_loader = torch.utils.data.DataLoader(
test_data,
batch_size=batch_size,
sampler=None,
shuffle=False,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
else:
raise ValueError
return data_loader
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_data_loader
def create_data_loaders(options, is_test=False):
if not is_test:
train_loader, valid_loader = base_data_loader.get_train_valid_loader(
options.dataset_dir,
batch_size=options.batchSize,
num_workers=options.nThreads,
pin_memory=True,
which_dataset=options.dataroot,
mask_type=options.mask_type,
rnl_params=options.rnl_params,
num_volumes_train=options.num_volumes_train,
num_volumes_val=options.num_volumes_val,
)
return train_loader, valid_loader
else:
test_loader = base_data_loader.get_test_loader(
options.dataset_dir,
batch_size=options.batchSize,
num_workers=0,
pin_memory=True,
which_dataset=options.dataroot,
mask_type=options.mask_type,
rnl_params=options.rnl_params,
)
return test_loader
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
def get_mask_func(mask_type, which_dataset, rnl_params=None):
# Whether the number of lines is random or not
random_num_lines = mask_type[-4:] == "_rnl"
if "symmetric_basic" in mask_type:
logging.info(
f"Mask is symmetric uniform choice with random_num_lines={random_num_lines}."
)
return SymmetricUniformChoiceMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "basic" in mask_type:
# First two parameters are ignored if `random_num_lines` is True
logging.info(
f"Mask is fixed acceleration mask with random_num_lines={random_num_lines}."
)
return BasicMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "low_to_high" in mask_type:
logging.info(
f"Mask is symmetric low to high with random_num_lines={random_num_lines}."
)
return SymmetricLowToHighMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "symmetric_grid" in mask_type:
logging.info("Mask is symmetric grid.")
return SymmetricUniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
if "grid" in mask_type:
logging.info("Mask is grid (not symmetric).")
return UniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
raise ValueError(f"Invalid mask type: {mask_type}.")
class MaskFunc:
def __init__(
self,
center_fractions,
accelerations,
which_dataset,
random_num_lines=False,
rnl_params=None,
):
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.random_num_lines = random_num_lines
if rnl_params is None:
# The lines below give approx. 4x acceleration on average.
self.min_lowf_lines = 10 if which_dataset != "KNEE_RAW" else 30
self.max_lowf_lines = 12 if which_dataset != "KNEE_RAW" else 32
self.highf_beta_alpha = 1
self.highf_beta_beta = 5
else:
params = [int(x) for x in rnl_params.split(",")]
assert len(params) == 4
self.min_lowf_lines = params[0]
self.max_lowf_lines = params[1]
self.highf_beta_alpha = params[2]
self.highf_beta_beta = params[3]
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
num_cols = shape[-2]
# Determine number of low and high frequency lines to scan
if self.random_num_lines:
# These are guaranteed to be an even number (useful for symmetric masks)
num_low_freqs = self.rng.choice(
range(self.min_lowf_lines, self.max_lowf_lines, 2)
)
num_high_freqs = (
int(
self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)
* (num_cols - num_low_freqs)
// 2
)
* 2
)
else:
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
num_low_freqs = int(round(num_cols * center_fraction))
num_high_freqs = int(num_cols // acceleration - num_low_freqs)
# Create the mask
mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-1] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
p = num_high_freqs / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < p
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
return mask
class BasicMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs
mask[hf_cols] = True
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformChoiceMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
mask[hf_cols] = True
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class UniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True
return mask
class SymmetricLowToHighMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
num_low_freqs += num_high_freqs
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
num_cols //= 2
num_low_freqs //= 2
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[:num_low_freqs] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
return mask
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import h5py
import numpy as np
import torch
import torch.utils.data
def ifftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def roll(x, shift, dim):
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
class RawSliceData(torch.utils.data.Dataset):
def __init__(
self,
root,
transform,
num_cols=None,
num_volumes=None,
num_rand_slices=None,
custom_split=None,
):
self.transform = transform
self.examples = []
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState(1234)
files = []
for fname in list(pathlib.Path(root).iterdir()):
data = h5py.File(fname, "r")
if num_cols is not None and data["kspace"].shape[2] != num_cols:
continue
files.append(fname)
if custom_split is not None:
split_info = []
with open(f"data/splits/raw_{custom_split}.txt") as f:
for line in f:
split_info.append(line.rsplit("\n")[0])
files = [f for f in files if f.name in split_info]
if num_volumes is not None:
self.rng.shuffle(files)
files = files[:num_volumes]
for volume_i, fname in enumerate(sorted(files)):
data = h5py.File(fname, "r")
kspace = data["kspace"]
if num_rand_slices is None:
num_slices = kspace.shape[0]
self.examples += [(fname, slice) for slice in range(num_slices)]
else:
slice_ids = list(range(kspace.shape[0]))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
self.examples += [
(fname, slice) for slice in slice_ids[:num_rand_slices]
]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice = self.examples[i]
with h5py.File(fname, "r") as data:
kspace = data["kspace"][slice]
return self.transform(kspace, data.attrs)
class RawDataTransform:
def __init__(self, mask_func, fixed_seed=None, seed_per_image=False):
self.mask_func = mask_func
self.fixed_seed = fixed_seed
self.seed_per_image = seed_per_image
def __call__(self, kspace, attrs):
kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))
kspace = ifftshift(kspace, dim=(0, 1))
image = torch.ifft(kspace, 2, normalized=False)
image = ifftshift(image, dim=(0, 1))
# norm = torch.sqrt(image[..., 0] ** 2 + image[..., 1] ** 2).max()
# 5.637766165023095e-08, 7.072103529760345e-07, 5.471710210258607e-06
# normalize by the mean norm of training images.
image /= 7.072103529760345e-07
kspace /= 7.072103529760345e-07
shape = np.array(kspace.shape)
seed = (
int(1009 * image.sum().abs())
if self.fixed_seed is None and self.seed_per_image
else self.fixed_seed
)
mask = self.mask_func(shape, seed)
return mask, image, kspace
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import fastmri.models
import torch
import activemri.models
# noinspection PyAbstractClass
class Unet(activemri.models.Reconstructor):
def __init__(
self, in_chans=2, out_chans=2, chans=32, num_pool_layers=4, drop_prob=0.0
):
super().__init__()
self.unet = fastmri.models.Unet(
in_chans,
out_chans,
chans=chans,
num_pool_layers=num_pool_layers,
drop_prob=drop_prob,
)
def forward( # type: ignore
self, image: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> Dict[str, Any]:
output = self.unet(image).squeeze(1)
std = std.unsqueeze(1).unsqueeze(2)
mean = mean.unsqueeze(1).unsqueeze(2)
reconstruction = output * std + mean
return {"reconstruction": reconstruction}
def init_from_checkpoint(self, checkpoint: Dict[str, Any]) -> Optional[Any]:
self.load_state_dict(checkpoint["state_dict"])
return None
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Dict, Optional
import torch.nn
class Reconstructor(torch.nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, *args, **kwargs) -> Dict[str, Any]:
pass
@abc.abstractmethod
def init_from_checkpoint(self, checkpoint: Dict[str, Any]) -> Optional[Any]:
pass
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import activemri.experimental.cvpr19_models.models.reconstruction as cvpr19_reconstruction
import activemri.models
# This is just a wrapper for the model in cvpr19_models folder
class CVPR19Reconstructor(activemri.models.Reconstructor):
def __init__(
self,
number_of_encoder_input_channels: int = 2,
number_of_decoder_output_channels: int = 3,
number_of_filters: int = 128,
dropout_probability: float = 0.0,
number_of_layers_residual_bottleneck: int = 6,
number_of_cascade_blocks: int = 3,
mask_embed_dim: int = 6,
padding_type: str = "reflect",
n_downsampling: int = 3,
img_width: int = 128,
use_deconv: bool = True,
):
super().__init__()
self.reconstructor = cvpr19_reconstruction.ReconstructorNetwork(
number_of_encoder_input_channels=number_of_encoder_input_channels,
number_of_decoder_output_channels=number_of_decoder_output_channels,
number_of_filters=number_of_filters,
dropout_probability=dropout_probability,
number_of_layers_residual_bottleneck=number_of_layers_residual_bottleneck,
number_of_cascade_blocks=number_of_cascade_blocks,
mask_embed_dim=mask_embed_dim,
padding_type=padding_type,
n_downsampling=n_downsampling,
img_width=img_width,
use_deconv=use_deconv,
)
def forward( # type: ignore
self, zero_filled_input: torch.Tensor, mask: torch.Tensor
) -> Dict[str, Any]:
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_input, mask
)
reconstructed_image = reconstructed_image.permute(0, 2, 3, 1)
uncertainty_map = uncertainty_map.permute(0, 2, 3, 1)
return {
"reconstruction": reconstructed_image,
"uncertainty_map": uncertainty_map,
"mask_embedding": mask_embedding,
}
def init_from_checkpoint(self, checkpoint: Dict[str, Any]):
return self.reconstructor.init_from_checkpoint(checkpoint)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.baselines.ddqn.py
=======================================
Baseline implementation of Double DQN, as described in
*Van Hasselt, Hado, Arthur Guez, and David Silver. "Deep reinforcement learning with
double q-learning." arXiv preprint arXiv:1509.06461 (2015)*.
"""
import argparse
import logging
import math
import os
import pickle
import random
import sys
import time
from typing import Any, Dict, List, Optional, Tuple
import filelock
import numpy as np
import tensorboardX
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import activemri.envs.envs as mri_envs
import activemri.experimental.cvpr19_models.models.evaluator as cvpr19_evaluator
from . import Policy, RandomPolicy, evaluation, replay_buffer
def _encode_obs_dict(obs: Dict[str, Any]) -> torch.Tensor:
reconstruction = obs["reconstruction"].permute(0, 3, 1, 2)
mask_embedding = obs["extra_outputs"]["mask_embedding"]
mask = obs["mask"]
batch_size, num_channels, img_height, img_width = reconstruction.shape
transformed_obs = torch.zeros(
batch_size, num_channels, img_height + 2, img_width
).float()
transformed_obs[..., :img_height, :] = reconstruction
# The second to last row is the mask
transformed_obs[..., img_height, :] = mask.unsqueeze(1)
# The last row is the mask embedding (padded with 0s if necessary)
if mask_embedding:
mask_embed_dim = len(mask_embedding[0])
transformed_obs[..., img_height + 1, :mask_embed_dim] = mask_embedding[
:, :, 0, 0
]
else:
transformed_obs[:, :, img_height + 1, 0] = np.nan
return transformed_obs
def _decode_obs_tensor(
obs_tensor: torch.Tensor, mask_embed_dim: int
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
reconstruction = obs_tensor[..., :-2, :]
bs = obs_tensor.shape[0]
if torch.isnan(obs_tensor[0, 0, -1, 0]).item() == 1:
assert mask_embed_dim == 0
mask_embedding = None
else:
mask_embedding = obs_tensor[:, 0, -1, :mask_embed_dim].view(bs, -1, 1, 1)
mask_embedding = mask_embedding.repeat(
1, 1, reconstruction.shape[2], reconstruction.shape[3]
)
mask = obs_tensor[:, 0, -2, :]
mask = mask.contiguous().view(bs, 1, 1, -1)
return reconstruction, mask, mask_embedding
def _get_epsilon(steps_done, opts):
return opts.epsilon_end + (opts.epsilon_start - opts.epsilon_end) * math.exp(
-1.0 * steps_done / opts.epsilon_decay
)
# noinspection PyAbstractClass
class Flatten(nn.Module):
# noinspection PyMethodMayBeStatic
def forward(self, x):
return x.view(x.size(0), -1)
class SimpleMLP(nn.Module):
""" Value network used for dataset specific DDQN model. """
def __init__(
self,
budget: int,
image_width: int,
num_hidden_layers: int = 2,
hidden_size: int = 32,
ignore_mask: bool = True,
):
super().__init__()
self.ignore_mask = ignore_mask
self.num_inputs = budget if self.ignore_mask else image_width
num_actions = image_width
self.linear1 = nn.Sequential(nn.Linear(self.num_inputs, hidden_size), nn.ReLU())
hidden_layers = []
for i in range(num_hidden_layers):
hidden_layers.append(
nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ReLU())
)
self.hidden = nn.Sequential(*hidden_layers)
self.output = nn.Linear(hidden_size, num_actions)
self.model = nn.Sequential(self.linear1, self.hidden, self.output)
def forward(self, obs: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
obs(torch.Tensor): The observation tensor. Once decoded, it only uses the mask
information. If ``__init__(..., ignore_mask=True)``, it will
additionally use the mask only to deduce the time step.
Returns:
torch.Tensor: Q-values for all actions at the given observation.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
_, mask, _ = _decode_obs_tensor(obs, 0)
previous_actions = mask.squeeze()
if self.ignore_mask:
input_tensor = torch.zeros(obs.shape[0], self.num_inputs).to(obs.device)
time_steps = previous_actions.sum(1).unsqueeze(1)
# We allow the model to receive observations that are over budget during test
# Code below randomizes the input to the model for these observations
index_over_budget = (time_steps >= self.num_inputs).squeeze()
time_steps = time_steps.clamp(0, self.num_inputs - 1)
input_tensor.scatter_(1, time_steps.long(), 1)
input_tensor[index_over_budget] = torch.randn_like(
input_tensor[index_over_budget]
)
else:
input_tensor = mask
value = self.model(input_tensor)
return value - 1e10 * previous_actions
class EvaluatorBasedValueNetwork(nn.Module):
""" Value network based on Zhang et al., CVPR'19 evaluator architecture. """
def __init__(
self, image_width: int, mask_embed_dim: int, legacy_offset: Optional[int] = None
):
super().__init__()
num_actions = image_width
if legacy_offset:
num_actions -= 2 * legacy_offset
self.legacy_offset = legacy_offset
self.evaluator = cvpr19_evaluator.EvaluatorNetwork(
number_of_filters=128,
number_of_conv_layers=4,
use_sigmoid=False,
width=image_width,
mask_embed_dim=mask_embed_dim,
num_output_channels=num_actions,
)
self.mask_embed_dim = mask_embed_dim
def forward(self, obs: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
obs(torch.Tensor): The observation tensor.
Returns:
torch.Tensor: Q-values for all actions at the given observation.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
reconstruction, mask, mask_embedding = _decode_obs_tensor(
obs, self.evaluator.mask_embed_dim
)
qvalue = self.evaluator(reconstruction, mask_embedding)
if self.legacy_offset:
mask = mask[..., self.legacy_offset : -self.legacy_offset]
return qvalue - 1e10 * mask.squeeze()
def _get_model(options):
if options.dqn_model_type == "simple_mlp":
return SimpleMLP(options.budget, options.image_width)
if options.dqn_model_type == "evaluator":
return EvaluatorBasedValueNetwork(
options.image_width,
options.mask_embedding_dim,
legacy_offset=getattr(options, "legacy_offset", None),
)
raise ValueError("Unknown model specified for DQN.")
class DDQN(nn.Module, Policy):
"""Implementation of Double DQN value network.
The configuration is given by the ``opts`` argument, which must contain the following
fields:
- mask_embedding_dim(int): See
:class:`cvpr19_models.models.evaluator.EvaluatorNetwork`.
- gamma(float): Discount factor for target updates.
- dqn_model_type(str): Describes the architecture of the neural net. Options
are "simple_mlp" and "evaluator", to use :class:`SimpleMLP` and
:class:`EvaluatorBasedValueNetwork`, respectively.
- budget(int): The environment's budget.
- image_width(int): The width of the input images.
Args:
device(``torch.device``): Device to use.
memory(optional(``replay_buffer.ReplayMemory``)): Replay buffer to sample transitions
from. Can be ``None``, for example, if this is a target network.
opts(``argparse.Namespace``): Options for the algorithm as explained above.
"""
def __init__(
self,
device: torch.device,
memory: Optional[replay_buffer.ReplayMemory],
opts: argparse.Namespace,
):
super().__init__()
self.model = _get_model(opts)
self.memory = memory
self.optimizer = optim.Adam(self.parameters(), lr=opts.dqn_learning_rate)
self.opts = opts
self.device = device
self.random_sampler = RandomPolicy()
self.to(device)
def add_experience(
self,
observation: np.array,
action: int,
next_observation: np.array,
reward: float,
done: bool,
):
self.memory.push(observation, action, next_observation, reward, done)
def update_parameters(self, target_net: nn.Module) -> Optional[Dict[str, Any]]:
self.model.train()
batch = self.memory.sample()
if batch is None:
return None
observations = batch["observations"].to(self.device)
next_observations = batch["next_observations"].to(self.device)
actions = batch["actions"].to(self.device)
rewards = batch["rewards"].to(self.device).squeeze()
dones = batch["dones"].to(self.device)
not_done_mask = dones.logical_not().squeeze()
# Compute Q-values and get best action according to online network
output_cur_step = self.forward(observations)
all_q_values_cur = output_cur_step
q_values = all_q_values_cur.gather(1, actions.unsqueeze(1))
# Compute target values using the best action found
if self.opts.gamma == 0.0:
target_values = rewards
else:
with torch.no_grad():
all_q_values_next = self.forward(next_observations)
target_values = torch.zeros(observations.shape[0], device=self.device)
del observations
if not_done_mask.any().item() != 0:
best_actions = all_q_values_next.detach().max(1)[1]
target_values[not_done_mask] = (
target_net.forward(next_observations)
.gather(1, best_actions.unsqueeze(1))[not_done_mask]
.squeeze()
.detach()
)
target_values = self.opts.gamma * target_values + rewards
# loss = F.mse_loss(q_values, target_values.unsqueeze(1))
loss = F.smooth_l1_loss(q_values, target_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
# Compute total gradient norm (for logging purposes) and then clip gradients
grad_norm: torch.Tensor = 0 # type: ignore
for p in list(filter(lambda p: p.grad is not None, self.parameters())):
grad_norm += p.grad.data.norm(2).item() ** 2
grad_norm = grad_norm ** 0.5
torch.nn.utils.clip_grad_value_(self.parameters(), 1)
self.optimizer.step()
torch.cuda.empty_cache()
return {
"loss": loss,
"grad_norm": grad_norm,
"q_values_mean": q_values.detach().mean().cpu().numpy(),
"q_values_std": q_values.detach().std().cpu().numpy(),
}
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
x(torch.Tensor): The observation tensor.
Returns:
Dictionary(torch.Tensor): The predicted Q-values.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
return self.model(x)
def get_action( # type: ignore
self, obs: Dict[str, Any], eps_threshold: float = 0.0
) -> List[int]:
"""Returns an action sampled from an epsilon-greedy policy.
With probability epsilon sample a random k-space column (ignoring active columns),
otherwise return the column with the highest estimated Q-value for the observation.
Args:
obs(torch.Tensor): The observation for which an action is required.
eps_threshold(float): The probability of sampling a random action instead of using
a greedy action.
"""
sample = random.random()
if sample < eps_threshold:
return self.random_sampler.get_action(obs)
with torch.no_grad():
self.model.eval()
obs_tensor = _encode_obs_dict(obs)
q_values = self(obs_tensor.to(self.device))
actions = torch.argmax(q_values, dim=1) + getattr(self.opts, "legacy_offset", 0)
return actions.tolist()
def _get_folder_lock(path):
return filelock.FileLock(path, timeout=-1)
class DDQNTester:
def __init__(
self, env: mri_envs.ActiveMRIEnv, training_dir: str, device: torch.device
):
self.env = env
self.device = device
self.training_dir = training_dir
self.evaluation_dir = os.path.join(training_dir, "evaluation")
os.makedirs(self.evaluation_dir, exist_ok=True)
self.folder_lock_path = DDQNTrainer.get_lock_filename(training_dir)
self.latest_policy_path = DDQNTrainer.get_name_latest_checkpoint(
self.training_dir
)
self.best_test_score = -np.inf
self.last_time_stamp = -np.inf
self.options = None
# Initialize writer and logger
self.writer = tensorboardX.SummaryWriter(os.path.join(self.evaluation_dir))
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
ch.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(self.evaluation_dir, "evaluation.log"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# Read the options used for training
options_file_found = False
while not options_file_found:
options_filename = DDQNTrainer.get_options_filename(self.training_dir)
with _get_folder_lock(self.folder_lock_path):
if os.path.isfile(options_filename):
self.logger.info(f"Options file found at {options_filename}.")
with open(options_filename, "rb") as f:
self.options = pickle.load(f)
options_file_found = True
if not options_file_found:
self.logger.info(f"No options file found at {options_filename}.")
self.logger.info("I will wait for five minutes before trying again.")
time.sleep(300)
# This change is needed so that util.test_policy writes results to correct directory
self.options.checkpoints_dir = self.evaluation_dir
os.makedirs(self.evaluation_dir, exist_ok=True)
# Initialize environment
self.options.image_width = self.env.action_space.n
self.logger.info(f"Created environment with {self.env.action_space.n} actions")
self.logger.info(f"Checkpoint dir for this job is {self.evaluation_dir}")
self.logger.info(
f"Evaluation will be done for model saved at {self.training_dir}"
)
# Initialize policy
self.policy = DDQN(device, None, self.options)
# Load info about best checkpoint tested and timestamp
self.load_tester_checkpoint_if_present()
def __call__(self):
training_done = False
while not training_done:
training_done = self.check_if_train_done()
self.logger.info(f"Is training done? {training_done}.")
checkpoint_episode, timestamp = self.load_latest_policy()
if timestamp is None or timestamp <= self.last_time_stamp:
# No new policy checkpoint to evaluate
self.logger.info(
"No new policy to evaluate. "
"I will wait for 10 minutes before trying again."
)
time.sleep(600)
continue
self.logger.info(
f"Found a new checkpoint with timestamp {timestamp}, "
f"I will start evaluation now."
)
test_scores, _ = evaluation.evaluate(
self.env,
self.policy,
self.options.num_test_episodes,
self.options.seed,
"val",
verbose=True,
)
auc_score = test_scores[self.options.reward_metric].sum(axis=1).mean()
if "mse" in self.options.reward_metric:
auc_score *= -1
self.logger.info(f"The test score for the model was {auc_score}.")
self.last_time_stamp = timestamp
if auc_score > self.best_test_score:
self.save_tester_checkpoint()
policy_path = os.path.join(self.evaluation_dir, "policy_best.pt")
self.save_policy(policy_path, checkpoint_episode)
self.best_test_score = auc_score
self.logger.info(
f"Saved DQN model with score {self.best_test_score} to {policy_path}, "
f"corresponding to episode {checkpoint_episode}."
)
def check_if_train_done(self):
with _get_folder_lock(self.folder_lock_path):
return os.path.isfile(DDQNTrainer.get_done_filename(self.training_dir))
def checkpoint(self):
self.save_tester_checkpoint()
def save_tester_checkpoint(self):
path = os.path.join(self.evaluation_dir, "tester_checkpoint.pickle")
with open(path, "wb") as f:
pickle.dump(
{
"best_test_score": self.best_test_score,
"last_time_stamp": self.last_time_stamp,
},
f,
)
def load_tester_checkpoint_if_present(self):
path = os.path.join(self.evaluation_dir, "tester_checkpoint.pickle")
if os.path.isfile(path):
with open(path, "rb") as f:
checkpoint = pickle.load(f)
self.best_test_score = checkpoint["best_test_score"]
self.last_time_stamp = checkpoint["last_time_stamp"]
self.logger.info(
f"Found checkpoint from previous evaluation run. "
f"Best Score set to {self.best_test_score}. "
f"Last Time Stamp set to {self.last_time_stamp}"
)
# noinspection PyProtectedMember
def load_latest_policy(self):
with _get_folder_lock(self.folder_lock_path):
if not os.path.isfile(self.latest_policy_path):
return None, None
timestamp = os.path.getmtime(self.latest_policy_path)
checkpoint = torch.load(self.latest_policy_path, map_location=self.device)
self.policy.load_state_dict(checkpoint["dqn_weights"])
return checkpoint["episode"], timestamp
def save_policy(self, path, episode):
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"episode": episode,
"options": self.options,
},
path,
)
class DDQNTrainer:
"""DDQN Trainer for active MRI acquisition.
Configuration for the trainer is provided by argument ``options``. Must contain the
following fields:
- "checkpoints_dir"(str): The directory where the model will be saved to (or
loaded from).
- dqn_batch_size(int): The batch size to use for updates.
- dqn_burn_in(int): How many steps to do before starting updating parameters.
- dqn_normalize(bool): ``True`` if running mean/st. deviation should be maintained
for observations.
- dqn_only_test(bool): ``True`` if the model will not be trained, thus only will
attempt to read from checkpoint and load only weights of the network (ignoring
training related information).
- dqn_test_episode_freq(optional(int)): How frequently (in number of env steps)
to perform test episodes.
- freq_dqn_checkpoint_save(int): How often (in episodes) to save the model.
- num_train_steps(int): How many environment steps to train for.
- replay_buffer_size(int): The capacity of the replay buffer.
- resume(bool): If true, will try to load weights from the checkpoints dir.
- num_test_episodes(int): How many test episodes to periodically evaluate for.
- seed(int): Sets the seed for the environment when running evaluation episodes.
- reward_metric(str): Which of the ``env.scores_keys()`` is used as reward. Mainly
used for logging purposes.
- target_net_update_freq(int): How often (in env's steps) to update the target
network.
Args:
options(``argparse.Namespace``): Options for the trainer.
env(``activemri.envs.ActiveMRIEnv``): Env for which the policy is trained.
device(``torch.device``): Device to use.
"""
def __init__(
self,
options: argparse.Namespace,
env: mri_envs.ActiveMRIEnv,
device: torch.device,
):
self.options = options
self.env = env
self.options.image_width = self.env.kspace_width
self.steps = 0
self.episode = 0
self.best_test_score = -np.inf
self.device = device
self.replay_memory = None
self.window_size = 1000
self.reward_images_in_window = np.zeros(self.window_size)
self.current_score_auc_window = np.zeros(self.window_size)
# ------- Init loggers ------
self.writer = tensorboardX.SummaryWriter(
os.path.join(self.options.checkpoints_dir)
)
self.logger = logging.getLogger()
logging_level = logging.DEBUG if self.options.debug else logging.INFO
self.logger.setLevel(logging_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging_level)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
fh = logging.FileHandler(
os.path.join(self.options.checkpoints_dir, "train.log")
)
fh.setLevel(logging_level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.info("Creating DDQN model.")
self.logger.info(
f"Creating replay buffer with capacity {options.mem_capacity}."
)
# ------- Create replay buffer and networks ------
# See _encode_obs_dict() for tensor format
self.obs_shape = (2, self.env.kspace_height + 2, self.env.kspace_width)
self.replay_memory = replay_buffer.ReplayMemory(
options.mem_capacity,
self.obs_shape,
self.options.dqn_batch_size,
self.options.dqn_burn_in,
use_normalization=self.options.dqn_normalize,
)
self.logger.info("Created replay buffer.")
self.policy = DDQN(device, self.replay_memory, self.options)
self.target_net = DDQN(device, None, self.options)
self.target_net.eval()
self.logger.info(
f"Created neural networks with {self.env.action_space.n} outputs."
)
# ------- Files used to communicate with DDQNTester ------
self.folder_lock_path = DDQNTrainer.get_lock_filename(
self.options.checkpoints_dir
)
with _get_folder_lock(self.folder_lock_path):
# Write options so that tester can read them
with open(
DDQNTrainer.get_options_filename(self.options.checkpoints_dir), "wb"
) as f:
pickle.dump(self.options, f)
# Remove previous done file since training will start over
done_file = DDQNTrainer.get_done_filename(self.options.checkpoints_dir)
if os.path.isfile(done_file):
os.remove(done_file)
@staticmethod
def get_done_filename(path):
return os.path.join(path, "DONE")
@staticmethod
def get_name_latest_checkpoint(path):
return os.path.join(path, "policy_checkpoint.pth")
@staticmethod
def get_options_filename(path):
return os.path.join(path, "options.pickle")
@staticmethod
def get_lock_filename(path):
return os.path.join(path, ".LOCK")
def _max_replay_buffer_size(self):
return min(self.options.num_train_steps, self.options.replay_buffer_size)
def load_checkpoint_if_needed(self):
if self.options.dqn_only_test or self.options.resume:
policy_path = os.path.join(self.options.dqn_weights_path)
if os.path.isfile(policy_path):
self.load(policy_path)
self.logger.info(f"Loaded DQN policy found at {policy_path}.")
else:
self.logger.warning(f"No DQN policy found at {policy_path}.")
if self.options.dqn_only_test:
raise FileNotFoundError
def _train_dqn_policy(self):
""" Trains the DQN policy. """
self.logger.info(
f"Starting training at step {self.steps}/{self.options.num_train_steps}. "
f"Best score so far is {self.best_test_score}."
)
steps_epsilon = self.steps
while self.steps < self.options.num_train_steps:
self.logger.info("Episode {}".format(self.episode + 1))
# Evaluate the current policy
if self.options.dqn_test_episode_freq and (
self.episode % self.options.dqn_test_episode_freq == 0
):
test_scores, _ = evaluation.evaluate(
self.env,
self.policy,
self.options.num_test_episodes,
self.options.seed,
"val",
)
self.env.set_training()
auc_score = test_scores[self.options.reward_metric].sum(axis=1).mean()
if "mse" in self.options.reward_metric:
auc_score *= -1
if auc_score > self.best_test_score:
policy_path = os.path.join(
self.options.checkpoints_dir, "policy_best.pt"
)
self.save(policy_path)
self.best_test_score = auc_score
self.logger.info(
f"Saved DQN model with score {self.best_test_score} to {policy_path}."
)
# Save model periodically
if self.episode % self.options.freq_dqn_checkpoint_save == 0:
self.checkpoint(save_memory=False)
# Run an episode and update model
obs, meta = self.env.reset()
msg = ", ".join(
[
f"({meta['fname'][i]}, {meta['slice_id'][i]})"
for i in range(len(meta["slice_id"]))
]
)
self.logger.info(f"Episode started with images {msg}.")
all_done = False
total_reward = 0
auc_score = 0
while not all_done:
epsilon = _get_epsilon(steps_epsilon, self.options)
action = self.policy.get_action(obs, eps_threshold=epsilon)
next_obs, reward, done, meta = self.env.step(action)
auc_score += meta["current_score"][self.options.reward_metric]
all_done = all(done)
self.steps += 1
obs_tensor = _encode_obs_dict(obs)
next_obs_tensor = _encode_obs_dict(next_obs)
batch_size = len(obs_tensor)
for i in range(batch_size):
self.policy.add_experience(
obs_tensor[i], action[i], next_obs_tensor[i], reward[i], done[i]
)
update_results = self.policy.update_parameters(self.target_net)
torch.cuda.empty_cache()
if self.steps % self.options.target_net_update_freq == 0:
self.logger.info("Updating target network.")
self.target_net.load_state_dict(self.policy.state_dict())
steps_epsilon += 1
# Adding per-step tensorboard logs
if self.steps % 250 == 0:
self.logger.debug("Writing to tensorboard.")
self.writer.add_scalar("epsilon", epsilon, self.steps)
if update_results is not None:
self.writer.add_scalar(
"loss", update_results["loss"], self.steps
)
self.writer.add_scalar(
"grad_norm", update_results["grad_norm"], self.steps
)
self.writer.add_scalar(
"mean_q_value", update_results["q_values_mean"], self.steps
)
self.writer.add_scalar(
"std_q_value", update_results["q_values_std"], self.steps
)
total_reward += reward
obs = next_obs
# Adding per-episode tensorboard logs
total_reward = total_reward.mean().item()
auc_score = auc_score.mean().item()
self.reward_images_in_window[self.episode % self.window_size] = total_reward
self.current_score_auc_window[self.episode % self.window_size] = auc_score
self.writer.add_scalar("episode_reward", total_reward, self.episode)
self.writer.add_scalar(
"average_reward_images_in_window",
np.sum(self.reward_images_in_window)
/ min(self.episode + 1, self.window_size),
self.episode,
)
self.writer.add_scalar(
"average_auc_score_in_window",
np.sum(self.current_score_auc_window)
/ min(self.episode + 1, self.window_size),
self.episode,
)
self.episode += 1
self.checkpoint()
# Writing DONE file with best test score
with _get_folder_lock(self.folder_lock_path):
with open(
DDQNTrainer.get_done_filename(self.options.checkpoints_dir), "w"
) as f:
f.write(str(self.best_test_score))
return self.best_test_score
def __call__(self):
self.load_checkpoint_if_needed()
return self._train_dqn_policy()
def checkpoint(self, save_memory=True):
policy_path = DDQNTrainer.get_name_latest_checkpoint(
self.options.checkpoints_dir
)
self.save(policy_path)
self.logger.info(f"Saved DQN checkpoint to {policy_path}")
if save_memory:
self.logger.info("Now saving replay memory.")
memory_path = self.replay_memory.save(
self.options.checkpoints_dir, "replay_buffer.pt"
)
self.logger.info(f"Saved replay buffer to {memory_path}.")
def save(self, path):
with _get_folder_lock(self.folder_lock_path):
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"target_weights": self.target_net.state_dict(),
"options": self.options,
"episode": self.episode,
"steps": self.steps,
"best_test_score": self.best_test_score,
"reward_images_in_window": self.reward_images_in_window,
"current_score_auc_window": self.current_score_auc_window,
},
path,
)
def load(self, path):
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
self.episode = checkpoint["episode"] + 1
if not self.options.dqn_only_test:
self.target_net.load_state_dict(checkpoint["target_weights"])
self.steps = checkpoint["steps"]
self.best_test_score = checkpoint["best_test_score"]
self.reward_images_in_window = checkpoint["reward_images_in_window"]
self.current_score_auc_window = checkpoint["current_score_auc_window"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import activemri.baselines as baselines
import activemri.envs as envs
def evaluate(
env: envs.envs.ActiveMRIEnv,
policy: baselines.Policy,
num_episodes: int,
seed: int,
split: str,
verbose: Optional[bool] = False,
) -> Tuple[Dict[str, np.ndarray], List[Tuple[Any, Any]]]:
env.seed(seed)
if split == "test":
env.set_test()
elif split == "val":
env.set_val()
else:
raise ValueError(f"Invalid evaluation split: {split}.")
score_keys = env.score_keys()
all_scores = dict(
(k, np.zeros((num_episodes * env.num_parallel_episodes, env.budget + 1)))
for k in score_keys
)
all_img_ids = []
trajectories_written = 0
for episode in range(num_episodes):
step = 0
obs, meta = env.reset()
if not obs:
break # no more images
# in case the last batch is smaller
actual_batch_size = len(obs["reconstruction"])
if verbose:
msg = ", ".join(
[
f"({meta['fname'][i]}, {meta['slice_id'][i]})"
for i in range(actual_batch_size)
]
)
print(f"Read images: {msg}")
for i in range(actual_batch_size):
all_img_ids.append((meta["fname"][i], meta["slice_id"][i]))
batch_idx = slice(
trajectories_written, trajectories_written + actual_batch_size
)
for k in score_keys:
all_scores[k][batch_idx, step] = meta["current_score"][k]
trajectories_written += actual_batch_size
all_done = False
while not all_done:
step += 1
action = policy.get_action(obs)
obs, reward, done, meta = env.step(action)
for k in score_keys:
all_scores[k][batch_idx, step] = meta["current_score"][k]
all_done = all(done)
for k in score_keys:
all_scores[k] = all_scores[k][: len(all_img_ids), :]
return all_scores, all_img_ids
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Dict, List
class Policy:
""" A basic policy interface. """
def __init__(self, *args, **kwargs):
pass
@abc.abstractmethod
def get_action(self, obs: Dict[str, Any], **kwargs: Any) -> List[int]:
""" Returns a list of actions for a batch of observations. """
pass
def __call__(self, obs: Dict[str, Any], **kwargs: Any) -> List[int]:
return self.get_action(obs, **kwargs)
from .simple_baselines import (
RandomPolicy,
RandomLowBiasPolicy,
LowestIndexPolicy,
OneStepGreedyOracle,
)
from .cvpr19_evaluator import CVPR19Evaluator
from .ddqn import DDQN, DDQNTrainer
from .evaluation import evaluate
__all__ = [
"RandomPolicy",
"RandomLowBiasPolicy",
"LowestIndexPolicy",
"OneStepGreedyOracle",
"CVPR19Evaluator",
"DDQN",
"DDQNTrainer",
"evaluate",
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.baselines.simple_baselines.py
=======================================
Simple baselines for active MRI acquisition.
"""
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import activemri.envs
from . import Policy
class RandomPolicy(Policy):
"""A policy representing random k-space selection.
Returns one of the valid actions uniformly at random.
Args:
seed(optional(int)): The seed to use for the random number generator, which is
based on ``torch.Generator()``.
"""
def __init__(self, seed: Optional[int] = None):
super().__init__()
self.rng = torch.Generator()
if seed:
self.rng.manual_seed(seed)
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a random action without replacement.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of random k-space column indices, one per batch element in
the observation. The indices are sampled from the set of inactive (0) columns
on each batch element.
"""
return (
(obs["mask"].logical_not().float() + 1e-6)
.multinomial(1, generator=self.rng)
.squeeze()
.tolist()
)
class RandomLowBiasPolicy(Policy):
def __init__(
self, acceleration: float, centered: bool = True, seed: Optional[int] = None
):
super().__init__()
self.acceleration = acceleration
self.centered = centered
self.rng = np.random.RandomState(seed)
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
mask = obs["mask"].squeeze().cpu().numpy()
new_mask = self._cartesian_mask(mask)
action = (new_mask - mask).argmax(axis=1)
return action.tolist()
@staticmethod
def _normal_pdf(length: int, sensitivity: float):
return np.exp(-sensitivity * (np.arange(length) - length / 2) ** 2)
def _cartesian_mask(self, current_mask: np.ndarray) -> np.ndarray:
batch_size, image_width = current_mask.shape
pdf_x = RandomLowBiasPolicy._normal_pdf(
image_width, 0.5 / (image_width / 10.0) ** 2
)
pdf_x = np.expand_dims(pdf_x, axis=0)
lmda = image_width / (2.0 * self.acceleration)
# add uniform distribution
pdf_x += lmda * 1.0 / image_width
# remove previously chosen columns
# note that pdf_x designed for centered masks
new_mask = (
np.fft.ifftshift(current_mask, axes=1)
if not self.centered
else current_mask.copy()
)
pdf_x = pdf_x * np.logical_not(new_mask)
# normalize probabilities and choose accordingly
pdf_x /= pdf_x.sum(axis=1, keepdims=True)
indices = [
self.rng.choice(image_width, 1, False, pdf_x[i]).item()
for i in range(batch_size)
]
new_mask[range(batch_size), indices] = 1
if not self.centered:
new_mask = np.fft.ifftshift(new_mask, axes=1)
return new_mask
class LowestIndexPolicy(Policy):
"""A policy that represents low-to-high frequency k-space selection.
Args:
alternate_sides(bool): If ``True`` the indices of selected actions will alternate
between the sides of the mask. For example, for an image with 100
columns, and non-centered k-space, the order will be 0, 99, 1, 98, 2, 97, ..., etc.
For the same size and centered, the order will be 49, 50, 48, 51, 47, 52, ..., etc.
centered(bool): If ``True`` (default), low frequencies are in the center of the mask.
Otherwise, they are in the edges of the mask.
"""
def __init__(
self,
alternate_sides: bool,
centered: bool = True,
):
super().__init__()
self.alternate_sides = alternate_sides
self.centered = centered
self.bottom_side = True
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a random action without replacement.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of k-space column indices, one per batch element in
the observation, equal to the lowest non-active k-space column in their
corresponding observation masks.
"""
mask = obs["mask"].squeeze().cpu().numpy()
new_mask = self._get_new_mask(mask)
action = (new_mask - mask).argmax(axis=1)
return action.tolist()
def _get_new_mask(self, current_mask: np.ndarray) -> np.ndarray:
# The code below assumes mask in non centered
new_mask = (
np.fft.ifftshift(current_mask, axes=1)
if self.centered
else current_mask.copy()
)
if self.bottom_side:
idx = np.arange(new_mask.shape[1], 0, -1)
else:
idx = np.arange(new_mask.shape[1])
if self.alternate_sides:
self.bottom_side = not self.bottom_side
# Next line finds the first non-zero index (from edge to center) and returns it
indices = (np.logical_not(new_mask) * idx).argmax(axis=1)
indices = np.expand_dims(indices, axis=1)
new_mask[range(new_mask.shape[0]), indices] = 1
if self.centered:
new_mask = np.fft.ifftshift(new_mask, axes=1)
return new_mask
class OneStepGreedyOracle(Policy):
"""A policy that returns the k-space column leading to best reconstruction score.
Args:
env(``activemri.envs.ActiveMRIEnv``): The environment for which the policy is computed
for.
metric(str): The name of the score metric to use (must be in ``env.score_keys()``).
num_samples(optional(int)): If given, only ``num_samples`` random actions will be
tested. Defaults to ``None``, which means that method will consider all actions.
rng(``numpy.random.RandomState``): A random number generator to use for sampling.
"""
def __init__(
self,
env: activemri.envs.ActiveMRIEnv,
metric: str,
num_samples: Optional[int] = None,
rng: Optional[np.random.RandomState] = None,
):
assert metric in env.score_keys()
super().__init__()
self.env = env
self.metric = metric
self.num_samples = num_samples
self.rng = rng if rng is not None else np.random.RandomState()
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a one-step greedy action maximizing reconstruction score.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of k-space column indices, one per batch element in
the observation, equal to the action that maximizes reconstruction score
(e.g, SSIM or negative MSE).
"""
mask = obs["mask"]
batch_size = mask.shape[0]
all_action_lists = []
for i in range(batch_size):
available_actions = mask[i].logical_not().nonzero().squeeze().tolist()
self.rng.shuffle(available_actions)
if len(available_actions) < self.num_samples:
# Add dummy actions to try if num of samples is higher than the
# number of inactive columns in this mask
available_actions.extend(
[0] * (self.num_samples - len(available_actions))
)
all_action_lists.append(available_actions)
all_scores = np.zeros((batch_size, self.num_samples))
for i in range(self.num_samples):
batch_action_to_try = [action_list[i] for action_list in all_action_lists]
obs, new_score = self.env.try_action(batch_action_to_try)
all_scores[:, i] = new_score[self.metric]
if self.metric in ["mse", "nmse"]:
all_scores *= -1
else:
assert self.metric in ["ssim", "psnr"]
best_indices = all_scores.argmax(axis=1)
action = []
for i in range(batch_size):
action.append(all_action_lists[i][best_indices[i]])
return action
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from typing import Dict, Optional
import numpy as np
import torch
class ReplayMemory:
"""Replay memory of transitions (ot, at, o_t+1, r_t+1).
Args:
capacity(int): How many transitions can be stored. After capacity is reached early
transitions are overwritten in FIFO fashion.
obs_shape(np.array): The shape of the numpy arrays representing observations.
batch_size(int): The size of batches returned by the replay buffer.
burn_in(int): While the replay buffer has lesser entries than this number,
:meth:`sample()` will return ``None``. Indicates a burn-in period before
training.
use_normalization(bool): If ``True``, the replay buffer will keep running mean
and standard deviation for the observations. Defaults to ``False``.
"""
def __init__(
self,
capacity: int,
obs_shape: np.array,
batch_size: int,
burn_in: int,
use_normalization: bool = False,
):
assert burn_in >= batch_size
self.batch_size = batch_size
self.burn_in = burn_in
self.observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.actions = torch.zeros(capacity, dtype=torch.long)
self.next_observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.rewards = torch.zeros(capacity, dtype=torch.float32)
self.dones = torch.zeros(capacity, dtype=torch.bool)
self.position = 0
self.mean_obs = torch.zeros(obs_shape, dtype=torch.float32)
self.std_obs = torch.ones(obs_shape, dtype=torch.float32)
self._m2_obs = torch.ones(obs_shape, dtype=torch.float32)
self.count_seen = 1
if not use_normalization:
self._normalize = lambda x: x # type: ignore
self._denormalize = lambda x: x # type: ignore
def _normalize(self, observation: torch.Tensor) -> Optional[torch.Tensor]:
if observation is None:
return None
return (observation - self.mean_obs) / self.std_obs
def _denormalize(self, observation: torch.Tensor) -> Optional[torch.Tensor]:
if observation is None:
return None
return self.std_obs * observation + self.mean_obs
def _update_stats(self, observation: torch.Tensor):
self.count_seen += 1
delta = observation - self.mean_obs
self.mean_obs = self.mean_obs + delta / self.count_seen
delta2 = observation - self.mean_obs
self._m2_obs = self._m2_obs + (delta * delta2)
self.std_obs = np.sqrt(self._m2_obs / (self.count_seen - 1))
def push(
self,
observation: np.array,
action: int,
next_observation: np.array,
reward: float,
done: bool,
):
""" Pushes a transition into the replay buffer. """
self.observations[self.position] = observation.clone()
self.actions[self.position] = torch.tensor([action], dtype=torch.long)
self.next_observations[self.position] = next_observation.clone()
self.rewards[self.position] = torch.tensor([reward], dtype=torch.float32)
self.dones[self.position] = torch.tensor([done], dtype=torch.bool)
self._update_stats(self.observations[self.position])
self.position = (self.position + 1) % len(self)
def sample(self) -> Optional[Dict[str, Optional[torch.Tensor]]]:
"""Samples a batch of transitions from the replay buffer.
Returns:
Dictionary(str, torch.Tensor): Contains keys for "observations",
"next_observations", "actions", "rewards", "dones". If the number of entries
in the buffer is less than ``self.burn_in``, then returns ``None`` instead.
"""
if self.count_seen - 1 < self.burn_in:
return None
indices = np.random.choice(min(self.count_seen - 1, len(self)), self.batch_size)
return {
"observations": self._normalize(self.observations[indices]),
"next_observations": self._normalize(self.next_observations[indices]),
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"dones": self.dones[indices],
}
def save(self, directory: str, name: str):
""" Saves all tensors and normalization info to file `directory/name` """
data = {
"observations": self.observations,
"actions": self.actions,
"next_observations": self.next_observations,
"rewards": self.rewards,
"dones": self.dones,
"position": self.position,
"mean_obs": self.mean_obs,
"std_obs": self.std_obs,
"m2_obs": self._m2_obs,
"count_seen": self.count_seen,
}
tmp_filename = tempfile.NamedTemporaryFile(delete=False, dir=directory)
try:
torch.save(data, tmp_filename)
except BaseException:
tmp_filename.close()
os.remove(tmp_filename.name)
raise
else:
tmp_filename.close()
full_path = os.path.join(directory, name)
os.rename(tmp_filename.name, full_path)
return full_path
def load(self, path: str, capacity: Optional[int] = None):
"""Loads the replay buffer from the specified path.
Args:
path(str): The path from where the memory will be loaded from.
capacity(int): If provided, the buffer is created with this much capacity. This
value must be larger than the length of the stored tensors.
"""
data = torch.load(path)
self.position = data["position"]
self.mean_obs = data["mean_obs"]
self.std_obs = data["std_obs"]
self._m2_obs = data["m2_obs"]
self.count_seen = data["count_seen"]
old_len = data["observations"].shape[0]
if capacity is None:
self.observations = data["observations"]
self.actions = data["actions"]
self.next_observations = data["next_observations"]
self.rewards = data["rewards"]
self.dones = data["dones"]
else:
assert capacity >= len(data["observations"])
obs_shape = data["observations"].shape[1:]
self.observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.actions = torch.zeros(capacity, dtype=torch.long)
self.next_observations = torch.zeros(
capacity, *obs_shape, dtype=torch.float32
)
self.rewards = torch.zeros(capacity, dtype=torch.float32)
self.dones = torch.zeros(capacity, dtype=torch.bool)
self.observations[:old_len] = data["observations"]
self.actions[:old_len] = data["actions"]
self.next_observations[:old_len] = data["next_observations"]
self.rewards[:old_len] = data["rewards"]
self.dones[:old_len] = data["dones"]
return old_len
def __len__(self):
return len(self.observations)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import torch
import activemri.experimental.cvpr19_models.models.evaluator as cvpr19_evaluator
from . import Policy
# This is just a wrapper for the model in cvpr19_models folder
class CVPR19Evaluator(Policy):
def __init__(
self,
evaluator_path: str,
device: torch.device,
add_mask: bool = False,
):
super().__init__()
evaluator_checkpoint = torch.load(evaluator_path)
assert (
evaluator_checkpoint is not None
and evaluator_checkpoint["evaluator"] is not None
)
self.evaluator = cvpr19_evaluator.EvaluatorNetwork(
number_of_filters=evaluator_checkpoint[
"options"
].number_of_evaluator_filters,
number_of_conv_layers=evaluator_checkpoint[
"options"
].number_of_evaluator_convolution_layers,
use_sigmoid=False,
width=evaluator_checkpoint["options"].image_width,
height=640,
mask_embed_dim=evaluator_checkpoint["options"].mask_embed_dim,
)
self.evaluator.load_state_dict(
{
key.replace("module.", ""): val
for key, val in evaluator_checkpoint["evaluator"].items()
}
)
self.evaluator.eval()
self.evaluator.to(device)
self.add_mask = add_mask
self.device = device
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
with torch.no_grad():
mask_embedding = (
None
if obs["extra_outputs"]["mask_embedding"] is None
else obs["extra_outputs"]["mask_embedding"].to(self.device)
)
mask = obs["mask"].bool().to(self.device)
mask = mask.view(mask.shape[0], 1, 1, -1)
k_space_scores = self.evaluator(
obs["reconstruction"].permute(0, 3, 1, 2).to(self.device),
mask_embedding,
mask if self.add_mask else None,
)
# Just fill chosen actions with some very large number to prevent from selecting again.
k_space_scores.masked_fill_(mask.squeeze(), 100000)
return torch.argmin(k_space_scores, dim=1).tolist()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.envs.envs.py
====================================
Gym-like environment for active MRI acquisition.
"""
import functools
import json
import os
import pathlib
import warnings
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
Union,
)
import fastmri.data
import gym
import numpy as np
import torch
import torch.utils.data
import activemri.data.singlecoil_knee_data as scknee_data
import activemri.data.transforms
import activemri.envs.masks
import activemri.envs.util
import activemri.models
DataInitFnReturnType = Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]
# -----------------------------------------------------------------------------
# DATA HANDLING
# -----------------------------------------------------------------------------
class CyclicSampler(torch.utils.data.Sampler):
def __init__(
self,
data_source: Sized,
order: Optional[Sized] = None,
loops: int = 1,
):
torch.utils.data.Sampler.__init__(self, data_source)
assert loops > 0
assert order is None or len(order) == len(data_source)
self.data_source = data_source
self.order = order if order is not None else range(len(self.data_source))
self.loops = loops
def _iterator(self):
for _ in range(self.loops):
for j in self.order:
yield j
def __iter__(self):
return iter(self._iterator())
def __len__(self):
return len(self.data_source) * self.loops
def _env_collate_fn(
batch: Tuple[Union[np.array, list], ...]
) -> Tuple[Union[np.array, list], ...]:
ret = []
for i in range(6): # kspace, mask, target, attrs, fname, slice_id
ret.append([item[i] for item in batch])
return tuple(ret)
class DataHandler:
def __init__(
self,
data_source: torch.utils.data.Dataset,
seed: Optional[int],
batch_size: int = 1,
loops: int = 1,
collate_fn: Optional[Callable] = None,
):
self._iter = None # type: Iterator[Any]
self._collate_fn = collate_fn
self._batch_size = batch_size
self._loops = loops
self._init_impl(data_source, seed, batch_size, loops, collate_fn)
def _init_impl(
self,
data_source: torch.utils.data.Dataset,
seed: Optional[int],
batch_size: int = 1,
loops: int = 1,
collate_fn: Optional[Callable] = None,
):
rng = np.random.RandomState(seed)
order = rng.permutation(len(data_source))
sampler = CyclicSampler(data_source, order, loops=loops)
if collate_fn:
self._data_loader = torch.utils.data.DataLoader(
data_source,
batch_size=batch_size,
sampler=sampler,
collate_fn=collate_fn,
)
else:
self._data_loader = torch.utils.data.DataLoader(
data_source, batch_size=batch_size, sampler=sampler
)
self._iter = iter(self._data_loader)
def reset(self):
self._iter = iter(self._data_loader)
def __iter__(self):
return self._iter
def __next__(self):
return next(self._iter)
def seed(self, seed: int):
self._init_impl(
self._data_loader.dataset,
seed,
self._batch_size,
self._loops,
self._collate_fn,
)
# -----------------------------------------------------------------------------
# BASE ACTIVE MRI ENV
# -----------------------------------------------------------------------------
class ActiveMRIEnv(gym.Env):
"""Base class for all active MRI acquisition environments.
This class provides the core logic implementation of the k-space acquisition process.
The class is not to be used directly, but rather one of its subclasses should be
instantiated. Subclasses of `ActiveMRIEnv` are responsible for data initialization
and specifying configuration options for the environment.
Args:
kspace_shape(tuple(int,int)): Shape of the k-space slices for the dataset.
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
no_checkpoint(optional(bool)): Set to ``True`` if you want to run your reconstructor
model without loading anything from a checkpoint.
"""
_num_loops_train_data = 100000
metadata = {"render.modes": ["human"], "video.frames_per_second": None}
def __init__(
self,
kspace_shape: Tuple[int, int],
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
):
# Default initialization
self._cfg: Mapping[str, Any] = None
self._data_location: str = None
self._reconstructor: activemri.models.Reconstructor = None
self._transform: Callable = None
self._train_data_handler: DataHandler = None
self._val_data_handler: DataHandler = None
self._test_data_handler: DataHandler = None
self._device = torch.device("cpu")
self._has_setup = False
self.num_parallel_episodes = num_parallel_episodes
self.budget = budget
self._seed = seed
self._rng = np.random.RandomState(seed)
self.reward_metric = "mse"
# Init from provided configuration
self.kspace_height, self.kspace_width = kspace_shape
# Gym init
# Observation is a dictionary
self.observation_space = None
self.action_space = gym.spaces.Discrete(self.kspace_width)
# This is changed by `set_training()`, `set_val()`, `set_test()`
self._current_data_handler: DataHandler = None
# These are changed every call to `reset()`
self._current_ground_truth: torch.Tensor = None
self._transform_wrapper: Callable = None
self._current_k_space: torch.Tensor = None
self._did_reset = False
self._steps_since_reset = 0
# These three are changed every call to `reset()` and every call to `step()`
self._current_reconstruction_numpy: np.ndarray = None
self._current_score: Dict[str, np.ndarray] = None
self._current_mask: torch.Tensor = None
# -------------------------------------------------------------------------
# Protected methods
# -------------------------------------------------------------------------
def _setup(
self,
cfg_filename: str,
data_init_func: Callable[[], DataInitFnReturnType],
):
self._has_setup = True
self._init_from_config_file(cfg_filename)
self._setup_data_handlers(data_init_func)
def _setup_data_handlers(
self,
data_init_func: Callable[[], DataInitFnReturnType],
):
train_data, val_data, test_data = data_init_func()
self._train_data_handler = DataHandler(
train_data,
self._seed,
batch_size=self.num_parallel_episodes,
loops=self._num_loops_train_data,
collate_fn=_env_collate_fn,
)
self._val_data_handler = DataHandler(
val_data,
self._seed + 1 if self._seed else None,
batch_size=self.num_parallel_episodes,
loops=1,
collate_fn=_env_collate_fn,
)
self._test_data_handler = DataHandler(
test_data,
self._seed + 2 if self._seed else None,
batch_size=self.num_parallel_episodes,
loops=1,
collate_fn=_env_collate_fn,
)
self._current_data_handler = self._train_data_handler
def _init_from_config_dict(self, cfg: Mapping[str, Any]):
self._cfg = cfg
self._data_location = cfg["data_location"]
if not os.path.isdir(self._data_location):
default_cfg, defaults_fname = activemri.envs.util.get_defaults_json()
self._data_location = default_cfg["data_location"]
if not os.path.isdir(self._data_location) and self._has_setup:
raise RuntimeError(
f"No 'data_location' key found in the given config. Please "
f"write dataset location in your JSON config, or in file {defaults_fname} "
f"(to use as a default)."
)
self._device = torch.device(cfg["device"])
self.reward_metric = cfg["reward_metric"]
if self.reward_metric not in ["mse", "ssim", "psnr", "nmse"]:
raise ValueError("Reward metric must be one of mse, nmse, ssim, or psnr.")
mask_func = activemri.envs.util.import_object_from_str(cfg["mask"]["function"])
self._mask_func = functools.partial(mask_func, cfg["mask"]["args"])
# Instantiating reconstructor
reconstructor_cfg = cfg["reconstructor"]
reconstructor_cls = activemri.envs.util.import_object_from_str(
reconstructor_cfg["cls"]
)
checkpoint_fname = pathlib.Path(reconstructor_cfg["checkpoint_fname"])
default_cfg, defaults_fname = activemri.envs.util.get_defaults_json()
saved_models_dir = default_cfg["saved_models_dir"]
checkpoint_path = pathlib.Path(saved_models_dir) / checkpoint_fname
if self._has_setup and not checkpoint_path.is_file():
raise RuntimeError(
f"No checkpoint was found at {str(checkpoint_path)}. "
f"Please make sure that both 'checkpoint_fname' (in your JSON config) "
f"and 'saved_models_dir' (in {defaults_fname}) are configured correctly."
)
checkpoint = (
torch.load(str(checkpoint_path)) if checkpoint_path.is_file() else None
)
options = reconstructor_cfg["options"]
if checkpoint and "options" in checkpoint:
msg = (
f"Checkpoint at {checkpoint_path.name} has an 'options' key. "
f"This will override the options defined in configuration file."
)
warnings.warn(msg)
options = checkpoint["options"]
assert isinstance(options, dict)
self._reconstructor = reconstructor_cls(**options)
self._reconstructor.init_from_checkpoint(checkpoint)
self._reconstructor.eval()
self._reconstructor.to(self._device)
self._transform = activemri.envs.util.import_object_from_str(
reconstructor_cfg["transform"]
)
def _init_from_config_file(self, config_filename: str):
with open(config_filename, "rb") as f:
self._init_from_config_dict(json.load(f))
@staticmethod
def _void_transform(
kspace: torch.Tensor,
mask: torch.Tensor,
target: torch.Tensor,
attrs: List[Dict[str, Any]],
fname: List[str],
slice_id: List[int],
) -> Tuple:
return kspace, mask, target, attrs, fname, slice_id
def _send_tuple_to_device(self, the_tuple: Tuple[Union[Any, torch.Tensor]]):
the_tuple_device = []
for i in range(len(the_tuple)):
if isinstance(the_tuple[i], torch.Tensor):
the_tuple_device.append(the_tuple[i].to(self._device))
else:
the_tuple_device.append(the_tuple[i])
return tuple(the_tuple_device)
@staticmethod
def _send_dict_to_cpu_and_detach(the_dict: Dict[str, Union[Any, torch.Tensor]]):
the_dict_cpu = {}
for key in the_dict:
if isinstance(the_dict[key], torch.Tensor):
the_dict_cpu[key] = the_dict[key].detach().cpu()
else:
the_dict_cpu[key] = the_dict[key]
return the_dict_cpu
def _compute_obs_and_score(
self, override_current_mask: Optional[torch.Tensor] = None
) -> Tuple[Dict[str, Any], Dict[str, np.ndarray]]:
mask_to_use = (
override_current_mask
if override_current_mask is not None
else self._current_mask
)
reconstructor_input = self._transform_wrapper(
kspace=self._current_k_space,
mask=mask_to_use,
ground_truth=self._current_ground_truth,
)
reconstructor_input = self._send_tuple_to_device(reconstructor_input)
with torch.no_grad():
extra_outputs = self._reconstructor(*reconstructor_input)
extra_outputs = self._send_dict_to_cpu_and_detach(extra_outputs)
reconstruction = extra_outputs["reconstruction"]
# this dict is only for storing the other outputs
del extra_outputs["reconstruction"]
# noinspection PyUnusedLocal
reconstructor_input = None # de-referencing GPU tensors
score = self._compute_score_given_tensors(
*self._process_tensors_for_score_fns(
reconstruction, self._current_ground_truth
)
)
obs = {
"reconstruction": reconstruction,
"extra_outputs": extra_outputs,
"mask": self._current_mask.clone().view(self._current_mask.shape[0], -1),
}
return obs, score
def _clear_cache_and_unset_did_reset(self):
self._current_mask = None
self._current_ground_truth = None
self._current_reconstruction_numpy = None
self._transform_wrapper = None
self._current_k_space = None
self._current_score = None
self._steps_since_reset = 0
self._did_reset = False
# noinspection PyMethodMayBeStatic
def _process_tensors_for_score_fns(
self, reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return reconstruction, ground_truth
@staticmethod
def _compute_score_given_tensors(
reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Dict[str, np.ndarray]:
mse = activemri.envs.util.compute_mse(reconstruction, ground_truth)
nmse = activemri.envs.util.compute_nmse(reconstruction, ground_truth)
ssim = activemri.envs.util.compute_ssim(reconstruction, ground_truth)
psnr = activemri.envs.util.compute_psnr(reconstruction, ground_truth)
return {"mse": mse, "nmse": nmse, "ssim": ssim, "psnr": psnr}
@staticmethod
def _convert_to_gray(array: np.ndarray) -> np.ndarray:
M = np.max(array)
m = np.min(array)
return (255 * (array - m) / (M - m)).astype(np.uint8)
@staticmethod
def _render_arrays(
ground_truth: np.ndarray, reconstruction: np.ndarray, mask: np.ndarray
) -> List[np.ndarray]:
batch_size, img_height, img_width = ground_truth.shape
frames = []
for i in range(batch_size):
mask_i = np.tile(mask[i], (1, img_height, 1))
pad = 32
mask_begin = pad
mask_end = mask_begin + mask.shape[-1]
gt_begin = mask_end + pad
gt_end = gt_begin + img_width
rec_begin = gt_end + pad
rec_end = rec_begin + img_width
error_begin = rec_end + pad
error_end = error_begin + img_width
frame = 128 * np.ones((img_height, error_end + pad), dtype=np.uint8)
frame[:, mask_begin:mask_end] = 255 * mask_i
frame[:, gt_begin:gt_end] = ActiveMRIEnv._convert_to_gray(ground_truth[i])
frame[:, rec_begin:rec_end] = ActiveMRIEnv._convert_to_gray(
reconstruction[i]
)
rel_error = np.abs((ground_truth[i] - reconstruction[i]) / ground_truth[i])
frame[:, error_begin:error_end] = 255 * rel_error.astype(np.uint8)
frames.append(frame)
return frames
# -------------------------------------------------------------------------
# Public methods
# -------------------------------------------------------------------------
def reset(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Starts a new acquisition episode with a batch of images.
This methods performs the following steps:
1. Reads a batch of images from the environment's dataset.
2. Creates an initial acquisition mask for each image.
3. Passes the loaded data and the initial masks to the transform function,
producing a batch of inputs for the environment's reconstructor model.
4. Calls the reconstructor model on this input and returns its output
as an observation.
The observation returned is a dictionary with the following keys:
- *"reconstruction"(torch.Tensor):* The reconstruction produced by the
environment's reconstructor model, using the current
acquisition mask.
- *"extra_outputs"(dict(str,Any)):* A dictionary with any additional
outputs produced by the reconstructor (e.g., uncertainty maps).
- *"mask"(torch.Tensor):* The current acquisition mask.
Returns:
tuple: tuple containing:
- obs(dict(str,any): Observation dictionary.
- metadata(dict(str,any): Metadata information containing the following keys:
- *"fname"(list(str)):* the filenames of the image read from the dataset.
- *"slice_id"(list(int)):* slice indices for each image within the volume.
- *"current_score"(dict(str,float):* A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with :meth:`score_keys()`.
"""
self._did_reset = True
try:
kspace, _, ground_truth, attrs, fname, slice_id = next(
self._current_data_handler
)
except StopIteration:
return {}, {}
self._current_ground_truth = torch.from_numpy(np.stack(ground_truth))
# Converting k-space to torch is better handled by transform,
# since we have both complex and non-complex versions
self._current_k_space = kspace
self._transform_wrapper = functools.partial(
self._transform, attrs=attrs, fname=fname, slice_id=slice_id
)
kspace_shapes = [tuple(k.shape) for k in kspace]
self._current_mask = self._mask_func(kspace_shapes, self._rng, attrs=attrs)
obs, self._current_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
self._steps_since_reset = 0
meta = {
"fname": fname,
"slice_id": slice_id,
"current_score": self._current_score,
}
return obs, meta
def step(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], np.ndarray, List[bool], Dict]:
"""Performs a step of active MRI acquisition.
Given a set of indices for k-space columns to acquire, updates the current batch
of masks with their corresponding indices, creates a new batch of reconstructions,
and returns the corresponding observations and rewards (for the observation format
see :meth:`reset()`). The reward is the improvement in score with
respect to the reconstruction before adding the indices. The specific score metric
used is determined by ``env.reward_metric``.
The method also returns a list of booleans, indicating whether any episodes in the
batch have already concluded.
The last return value is a metadata dictionary. It contains a single key
"current_score", which contains a dictionary with the error measures for the
reconstruction (e.g., ``"mse", "nmse", "ssim", "psnr"``). The measures
considered can be obtained with :meth:`score_keys()`.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The transition information in the order
``(next_observation, reward, done, meta)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``reward(np.ndarray)``: length equal to current number of parallel
episodes.
- ``done(list(bool))``: same length as ``reward``.
- ``meta(dict)``: see description above.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.step() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
self._current_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
reward = new_score[self.reward_metric] - self._current_score[self.reward_metric]
if self.reward_metric in ["mse", "nmse"]:
reward *= -1
else:
assert self.reward_metric in ["ssim", "psnr"]
self._current_score = new_score
self._steps_since_reset += 1
done = activemri.envs.masks.check_masks_complete(self._current_mask)
if self.budget and self._steps_since_reset >= self.budget:
done = [True] * len(done)
return obs, reward, done, {"current_score": self._current_score}
def try_action(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], Dict[str, np.ndarray]]:
"""Simulates the effects of actions without changing the environment's state.
This method operates almost exactly as :meth:`step()`, with the exception that
the environment's state is not altered. The method returns the next observation
and the resulting reconstruction score after applying the give k-space columns to
each image in the current batch of episodes.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The reconstruction information in the order
``(next_observation, current_score)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``current_score(dict(str, float))``: A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with `ActiveMRIEnv.score_keys()`.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.try_action() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
new_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score(override_current_mask=new_mask)
return obs, new_score
def render(self, mode="human"):
"""Renders information about the environment's current state.
Returns:
``np.ndarray``: An image frame containing, from left to right: current
acquisition mask, current ground image, current reconstruction,
and current relative reconstruction error.
"""
pass
def seed(self, seed: Optional[int] = None):
"""Sets the seed for the internal number generator.
This seeds affects the order of the data loader for all loop modalities (i.e.,
training, validation, test).
Args:
seed(optional(int)): The seed for the environment's random number generator.
"""
self._seed = seed
self._rng = np.random.RandomState(seed)
self._train_data_handler.seed(seed)
self._val_data_handler.seed(seed)
self._test_data_handler.seed(seed)
def set_training(self, reset: bool = False):
"""Sets the environment to use the training data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._train_data_handler.reset()
self._current_data_handler = self._train_data_handler
self._clear_cache_and_unset_did_reset()
def set_val(self, reset: bool = True):
"""Sets the environment to use the validation data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._val_data_handler.reset()
self._current_data_handler = self._val_data_handler
self._clear_cache_and_unset_did_reset()
def set_test(self, reset: bool = True):
"""Sets the environment to use the test data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._test_data_handler.reset()
self._current_data_handler = self._test_data_handler
self._clear_cache_and_unset_did_reset()
@staticmethod
def score_keys() -> List[str]:
""" Returns the list of score metric names used by this environment. """
return ["mse", "nmse", "ssim", "psnr"]
# -----------------------------------------------------------------------------
# CUSTOM ENVIRONMENTS
# -----------------------------------------------------------------------------
class MICCAI2020Env(ActiveMRIEnv):
"""Implementation of environment used for *Pineda et al., MICCAI 2020*.
This environment is provided to facilitate replication of the experiments performed
in *Luis Pineda, Sumana Basu, Adriana Romero, Roberto Calandra, Michal Drozdzal,
"Active MR k-space Sampling with Reinforcement Learning". MICCAI 2020.*
The dataset is the same as that of :class:`SingleCoilKneeEnv`, except that we provide
a custom validation/test split of the original validation data. The environment's
configuration file is set to use the reconstruction model used in the paper
(see :class:`activemri.models.cvpr19_reconstructor.CVPR19Reconstructor`), as well
as the proper transform to generate inputs for this model.
The k-space shape of this environment is set to ``(640, 368)``.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
extreme(bool): ``True`` or ``False`` for running extreme acceleration or normal
acceleration scenarios described in the paper, respectively.
"""
KSPACE_WIDTH = scknee_data.MICCAI2020Data.KSPACE_WIDTH
START_PADDING = scknee_data.MICCAI2020Data.START_PADDING
END_PADDING = scknee_data.MICCAI2020Data.END_PADDING
CENTER_CROP_SIZE = scknee_data.MICCAI2020Data.CENTER_CROP_SIZE
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
extreme: bool = False,
obs_includes_padding: bool = True,
):
super().__init__(
(640, self.KSPACE_WIDTH),
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
)
if extreme:
self._setup("configs/miccai-2020-extreme-acc.json", self._create_dataset)
else:
self._setup("configs/miccai-2020-normal-acc.json", self._create_dataset)
self.obs_includes_padding = obs_includes_padding
# -------------------------------------------------------------------------
# Protected methods
# -------------------------------------------------------------------------
def _create_dataset(self) -> DataInitFnReturnType:
root_path = pathlib.Path(self._data_location)
train_path = root_path / "knee_singlecoil_train"
val_and_test_path = root_path / "knee_singlecoil_val"
train_data = scknee_data.MICCAI2020Data(
train_path,
ActiveMRIEnv._void_transform,
num_cols=self.KSPACE_WIDTH,
)
val_data = scknee_data.MICCAI2020Data(
val_and_test_path,
ActiveMRIEnv._void_transform,
custom_split="val",
num_cols=self.KSPACE_WIDTH,
)
test_data = scknee_data.MICCAI2020Data(
val_and_test_path,
ActiveMRIEnv._void_transform,
custom_split="test",
num_cols=self.KSPACE_WIDTH,
)
return train_data, val_data, test_data
def _process_tensors_for_score_fns(
self, reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# Compute magnitude (for metrics)
reconstruction = activemri.data.transforms.to_magnitude(reconstruction, dim=3)
ground_truth = activemri.data.transforms.to_magnitude(ground_truth, dim=3)
reconstruction = activemri.data.transforms.center_crop(
reconstruction, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
ground_truth = activemri.data.transforms.center_crop(
ground_truth, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
return reconstruction, ground_truth
# -------------------------------------------------------------------------
# Public methods
# -------------------------------------------------------------------------
def reset(
self,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
obs, meta = super().reset()
if not obs:
return obs, meta
if self.obs_includes_padding:
obs["mask"][:, self.START_PADDING : self.END_PADDING] = 1
return obs, meta
def step(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], np.ndarray, List[bool], Dict]:
obs, reward, done, meta = super().step(action)
if self.obs_includes_padding:
obs["mask"][:, self.START_PADDING : self.END_PADDING] = 1
return obs, reward, done, meta
def render(self, mode="human"):
gt = self._current_ground_truth.cpu().numpy()
rec = self._current_reconstruction_numpy
gt = activemri.data.transforms.center_crop(
(gt ** 2).sum(axis=3) ** 0.5, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
rec = activemri.data.transforms.center_crop(
(rec ** 2).sum(axis=3) ** 0.5,
(self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE),
)
return ActiveMRIEnv._render_arrays(gt, rec, self._current_mask.cpu().numpy())
class FastMRIEnv(ActiveMRIEnv):
"""Base class for all fastMRI environments.
This class can be used to instantiate active acquisition environments using fastMRI
data. However, for convenience we provide subclasses of ``FastMRIEnv`` with
default configuration options for each dataset:
- :class:`SingleCoilKneeEnv`
- :class:`MultiCoilKneeEnv`
- :class:`SingleCoilBrainEnv`
- :class:`MultiCoilKneeEnv`
Args:
config_path(str): The path to the JSON configuration file.
dataset_name(str): One of "knee_singlecoil", "multicoil" (for knee),
"brain_multicoil". Primarily used to locate the fastMRI
dataset in the user's fastMRI data root folder.
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
config_path: str,
dataset_name: str,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
assert dataset_name in ["knee_singlecoil", "multicoil", "brain_multicoil"]
challenge = "singlecoil" if dataset_name == "knee_singlecoil" else "multicoil"
super().__init__(
(640, np.max(num_cols)),
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
)
self.num_cols = num_cols
self.dataset_name = dataset_name
self.challenge = challenge
self._setup(config_path, self._create_dataset)
def _create_dataset(self) -> DataInitFnReturnType:
root_path = pathlib.Path(self._data_location)
datacache_dir = activemri.envs.util.maybe_create_datacache_dir()
train_path = root_path / f"{self.dataset_name}_train"
val_path = root_path / f"{self.dataset_name}_val"
val_cache_file = datacache_dir / f"val_{self.dataset_name}_cache.pkl"
test_path = root_path / f"{self.dataset_name}_test"
test_cache_file = datacache_dir / f"test_{self.dataset_name}_cache.pkl"
if not test_path.is_dir():
warnings.warn(
f"No test directory found for {self.dataset_name}. "
f"I will use val directory for test model (env.set_test())."
)
test_path = val_path
test_cache_file = val_cache_file
train_data = fastmri.data.SliceDataset(
train_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=datacache_dir / f"train_{self.dataset_name}_cache.pkl",
)
val_data = fastmri.data.SliceDataset(
val_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=val_cache_file,
)
test_data = fastmri.data.SliceDataset(
test_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=test_cache_file,
)
return train_data, val_data, test_data
def render(self, mode="human"):
return ActiveMRIEnv._render_arrays(
self._current_ground_truth.cpu().numpy(),
self._current_reconstruction_numpy,
self._current_mask.cpu().numpy(),
)
class SingleCoilKneeEnv(FastMRIEnv):
"""Convenience class to access single-coil knee data.
Loads the configuration from ``configs/single-coil-knee.json``.
Looks for datasets named "knee_singlecoil_{train/val/test}" under the ``data_location`` dir.
If "test" is not found, it uses "val" folder for test mode.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
super().__init__(
"configs/single-coil-knee.json",
"knee_singlecoil",
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
num_cols=num_cols,
)
class MultiCoilKneeEnv(FastMRIEnv):
"""Convenience class to access multi-coil knee data.
Loads the configuration from ``configs/multi-coil-knee.json``.
Looks for datasets named "multicoil_{train/val/test}" under default ``data_location`` dir.
If "test" is not found, it uses "val" folder for test mode.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
super().__init__(
"configs/multi-coil-knee.json",
"multicoil",
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
num_cols=num_cols,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import json
import pathlib
from typing import Dict, Tuple
import numpy as np
import skimage.metrics
import torch
def get_user_dir() -> pathlib.Path:
return pathlib.Path.home() / ".activemri"
def maybe_create_datacache_dir() -> pathlib.Path:
datacache_dir = get_user_dir() / "__datacache__"
if not datacache_dir.is_dir():
datacache_dir.mkdir()
return datacache_dir
def get_defaults_json() -> Tuple[Dict[str, str], str]:
defaults_path = get_user_dir() / "defaults.json"
if not pathlib.Path.exists(defaults_path):
parent = defaults_path.parents[0]
parent.mkdir(exist_ok=True)
content = {"data_location": "", "saved_models_dir": ""}
with defaults_path.open("w", encoding="utf-8") as f:
json.dump(content, f)
else:
with defaults_path.open("r", encoding="utf-8") as f:
content = json.load(f)
return content, str(defaults_path)
def import_object_from_str(classname: str):
the_module, the_object = classname.rsplit(".", 1)
the_object = classname.split(".")[-1]
module = importlib.import_module(the_module)
return getattr(module, the_object)
def compute_ssim(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ssims = []
for i in range(xs.shape[0]):
ssim = skimage.metrics.structural_similarity(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
ssims.append(ssim)
return np.array(ssims, dtype=np.float32)
def compute_psnr(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
psnrs = []
for i in range(xs.shape[0]):
psnr = skimage.metrics.peak_signal_noise_ratio(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
psnrs.append(psnr)
return np.array(psnrs, dtype=np.float32)
def compute_mse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
dims = tuple(range(1, len(xs.shape)))
return np.mean((ys.cpu().numpy() - xs.cpu().numpy()) ** 2, axis=dims)
def compute_nmse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ys_numpy = ys.cpu().numpy()
nmses = []
for i in range(xs.shape[0]):
x = xs[i].cpu().numpy()
y = ys_numpy[i]
nmse = np.linalg.norm(y - x) ** 2 / np.linalg.norm(y) ** 2
nmses.append(nmse)
return np.array(nmses, dtype=np.float32)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = [
"ActiveMRIEnv",
"MICCAI2020Env",
"FastMRIEnv",
"SingleCoilKneeEnv",
"MultiCoilKneeEnv",
]
from .envs import (
ActiveMRIEnv,
FastMRIEnv,
MICCAI2020Env,
MultiCoilKneeEnv,
SingleCoilKneeEnv,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.envs.masks.py
====================================
Utilities to generate and manipulate active acquisition masks.
"""
from typing import Any, Dict, List, Optional, Sequence, Tuple
import fastmri
import numpy as np
import torch
def update_masks_from_indices(
masks: torch.Tensor, indices: Sequence[int]
) -> torch.Tensor:
assert masks.shape[0] == len(indices)
new_masks = masks.clone()
for i in range(len(indices)):
new_masks[i, ..., indices[i]] = 1
return new_masks
def check_masks_complete(masks: torch.Tensor) -> List[bool]:
done = []
for mask in masks:
done.append(mask.bool().all().item())
return done
def sample_low_frequency_mask(
mask_args: Dict[str, Any],
kspace_shapes: List[Tuple[int, ...]],
rng: np.random.RandomState,
attrs: Optional[List[Dict[str, Any]]] = None,
) -> torch.Tensor:
"""Samples low frequency masks.
Returns masks that contain some number of the lowest k-space frequencies active.
The number of frequencies doesn't have to be the same for all masks in the batch, and
it can also be a random number, depending on the given ``mask_args``. Active columns
will be represented as 1s in the mask, and inactive columns as 0s.
The distribution and shape of the masks can be controlled by ``mask_args``. This is a
dictionary with the following keys:
- *"max_width"(int)*: The maximum width of the masks.
- *"min_cols"(int)*: The minimum number of low frequencies columns to activate per side.
- *"max_cols"(int)*: The maximum number of low frequencies columns to activate
per side (inclusive).
- *"width_dim"(int)*: Indicates which of the dimensions in ``kspace_shapes``
corresponds to the k-space width.
- *"centered"(bool)*: Specifies if the low frequencies are in the center of the
k-space (``True``) or on the edges (``False``).
- *"apply_attrs_padding"(optional(bool))*: If ``True``, the function will read
keys ``"padding_left"`` and ``"padding_right"`` from ``attrs`` and set all
corresponding high-frequency columns to 1.
The number of 1s in the effective region of the mask (see next paragraph) is sampled
between ``mask_args["min_cols"]`` and ``mask_args["max_cols"]`` (inclusive).
The number of dimensions for the mask tensor will be ``mask_args["width_dim"] + 2``.
The size will be ``[batch_size, 1, ..., 1, mask_args["max_width"]]``. For example, with
``mask_args["width_dim"] = 1`` and ``mask_args["max_width"] = 368``, output tensor
has shape ``[batch_size, 1, 368]``.
This function supports simultaneously sampling masks for k-space of different number of
columns. This is controlled by argument ``kspace_shapes``. From this list, the function will
obtain 1) ``batch_size = len(kspace_shapes``), and 2) the width of the k-spaces for
each element in the batch. The i-th mask will have
``kspace_shapes[item][mask_args["width_dim"]]``
*effective* columns.
Note:
The mask tensor returned will always have
``mask_args["max_width"]`` columns. However, for any element ``i``
s.t. ``kspace_shapes[i][mask_args["width_dim"]] < mask_args["max_width"]``, the
function will then pad the extra k-space columns with 1s. The rest of the columns
will be filled out as if the mask has the same width as that indicated by
``kspace_shape[i]``.
Args:
mask_args(dict(str,any)): Specifies configuration options for the masks, as explained
above.
kspace_shapes(list(tuple(int,...))): Specifies the shapes of the k-space data on
which this mask will be applied, as explained above.
rng(``np.random.RandomState``): A random number generator to sample the masks.
attrs(dict(str,int)): Used to determine any high-frequency padding. It must contain
keys ``"padding_left"`` and ``"padding_right"``.
Returns:
``torch.Tensor``: The generated low frequency masks.
"""
batch_size = len(kspace_shapes)
num_cols = [shape[mask_args["width_dim"]] for shape in kspace_shapes]
mask = torch.zeros(batch_size, mask_args["max_width"])
num_low_freqs = rng.randint(
mask_args["min_cols"], mask_args["max_cols"] + 1, size=batch_size
)
for i in range(batch_size):
# If padding needs to be accounted for, only add low frequency lines
# beyond the padding
if attrs and mask_args.get("apply_attrs_padding", False):
padding_left = attrs[i]["padding_left"]
padding_right = attrs[i]["padding_right"]
else:
padding_left, padding_right = 0, num_cols[i]
pad = (num_cols[i] - 2 * num_low_freqs[i] + 1) // 2
mask[i, pad : pad + 2 * num_low_freqs[i]] = 1
mask[i, :padding_left] = 1
mask[i, padding_right : num_cols[i]] = 1
if not mask_args["centered"]:
mask[i, : num_cols[i]] = fastmri.ifftshift(mask[i, : num_cols[i]])
mask[i, num_cols[i] : mask_args["max_width"]] = 1
mask_shape = [batch_size] + [1] * (mask_args["width_dim"] + 1)
mask_shape[mask_args["width_dim"] + 1] = mask_args["max_width"]
return mask.view(*mask_shape)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.data.transforms.py
====================================
Transform functions to process fastMRI data for reconstruction models.
"""
from typing import Tuple, Union
import fastmri
import fastmri.data.transforms as fastmri_transforms
import numpy as np
import torch
import activemri.data.singlecoil_knee_data as scknee_data
TensorType = Union[np.ndarray, torch.Tensor]
def to_magnitude(tensor: torch.Tensor, dim: int) -> torch.Tensor:
return (tensor ** 2).sum(dim=dim) ** 0.5
def center_crop(x: TensorType, shape: Tuple[int, int]) -> TensorType:
"""Center crops a tensor to the desired 2D shape.
Args:
x(union(``torch.Tensor``, ``np.ndarray``)): The tensor to crop.
Shape should be ``(batch_size, height, width)``.
shape(tuple(int,int)): The desired shape to crop to.
Returns:
(union(``torch.Tensor``, ``np.ndarray``)): The cropped tensor.
"""
assert len(x.shape) == 3
assert 0 < shape[0] <= x.shape[1]
assert 0 < shape[1] <= x.shape[2]
h_from = (x.shape[1] - shape[0]) // 2
w_from = (x.shape[2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
x = x[:, h_from:h_to, w_from:w_to]
return x
def ifft_permute_maybe_shift(
x: torch.Tensor, normalized: bool = False, ifft_shift: bool = False
) -> torch.Tensor:
x = x.permute(0, 2, 3, 1)
y = torch.ifft(x, 2, normalized=normalized)
if ifft_shift:
y = fastmri.ifftshift(y, dim=(1, 2))
return y.permute(0, 3, 1, 2)
def raw_transform_miccai2020(kspace=None, mask=None, **_kwargs):
"""Transform to produce input for reconstructor used in `Pineda et al. MICCAI'20 <https://arxiv.org/pdf/2007.10469.pdf>`_.
Produces a zero-filled reconstruction and a mask that serve as a input to models of type
:class:`activemri.models.cvpr10_reconstructor.CVPR19Reconstructor`. The mask is almost
equal to the mask passed as argument, except that high-frequency padding columns are set
to 1, and the mask is reshaped to be compatible with the reconstructor.
Args:
kspace(``np.ndarray``): The array containing the k-space data returned by the dataset.
mask(``torch.Tensor``): The masks to apply to the k-space.
Returns:
tuple: A tuple containing:
- ``torch.Tensor``: The zero-filled reconstructor that will be passed to the
reconstructor.
- ``torch.Tensor``: The mask to use as input to the reconstructor.
"""
# alter mask to always include the highest frequencies that include padding
mask[
:,
:,
scknee_data.MICCAI2020Data.START_PADDING : scknee_data.MICCAI2020Data.END_PADDING,
] = 1
mask = mask.unsqueeze(1)
all_kspace = []
for ksp in kspace:
all_kspace.append(torch.from_numpy(ksp).permute(2, 0, 1))
k_space = torch.stack(all_kspace)
masked_true_k_space = torch.where(
mask.byte(),
k_space,
torch.tensor(0.0).to(mask.device),
)
reconstructor_input = ifft_permute_maybe_shift(masked_true_k_space, ifft_shift=True)
return reconstructor_input, mask
# Based on
# https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py
def _base_fastmri_unet_transform(
kspace,
mask,
ground_truth,
attrs,
which_challenge="singlecoil",
):
kspace = fastmri_transforms.to_tensor(kspace)
mask = mask[..., : kspace.shape[-2]] # accounting for variable size masks
masked_kspace = kspace * mask.unsqueeze(-1) + 0.0
# inverse Fourier transform to get zero filled solution
image = fastmri.ifft2c(masked_kspace)
# crop input to correct size
if ground_truth is not None:
crop_size = (ground_truth.shape[-2], ground_truth.shape[-1])
else:
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
# check for FLAIR 203
if image.shape[-2] < crop_size[1]:
crop_size = (image.shape[-2], image.shape[-2])
# noinspection PyTypeChecker
image = fastmri_transforms.complex_center_crop(image, crop_size)
# absolute value
image = fastmri.complex_abs(image)
# apply Root-Sum-of-Squares if multicoil data
if which_challenge == "multicoil":
image = fastmri.rss(image)
# normalize input
image, mean, std = fastmri_transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
return image.unsqueeze(0), mean, std
def _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, which_challenge="singlecoil"
):
batch_size = len(kspace)
images, means, stds = [], [], []
for i in range(batch_size):
image, mean, std = _base_fastmri_unet_transform(
kspace[i],
mask[i],
ground_truth[i],
attrs[i],
which_challenge=which_challenge,
)
images.append(image)
means.append(mean)
stds.append(std)
return torch.stack(images), torch.stack(means), torch.stack(stds)
# noinspection PyUnusedLocal
def fastmri_unet_transform_singlecoil(
kspace=None, mask=None, ground_truth=None, attrs=None, fname=None, slice_id=None
):
"""
Transform to use as input to fastMRI's Unet model for singlecoil data.
This is an adapted version of the code found in
`fastMRI <https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py#L190>`_.
"""
return _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, "singlecoil"
)
# noinspection PyUnusedLocal
def fastmri_unet_transform_multicoil(
kspace=None, mask=None, ground_truth=None, attrs=None, fname=None, slice_id=None
):
"""Transform to use as input to fastMRI's Unet model for multicoil data.
This is an adapted version of the code found in
`fastMRI <https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py#L190>`_.
"""
return _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, "multicoil"
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
from typing import Callable, List, Optional, Tuple
import fastmri
import h5py
import numpy as np
import torch.utils.data
# -----------------------------------------------------------------------------
# Single coil knee dataset (as used in MICCAI'20)
# -----------------------------------------------------------------------------
class MICCAI2020Data(torch.utils.data.Dataset):
# This is the same as fastMRI singlecoil_knee, except we provide a custom test split
# and also normalize images by the mean norm of the k-space over training data
KSPACE_WIDTH = 368
KSPACE_HEIGHT = 640
START_PADDING = 166
END_PADDING = 202
CENTER_CROP_SIZE = 320
def __init__(
self,
root: pathlib.Path,
transform: Callable,
num_cols: Optional[int] = None,
num_volumes: Optional[int] = None,
num_rand_slices: Optional[int] = None,
custom_split: Optional[str] = None,
):
self.transform = transform
self.examples: List[Tuple[pathlib.PurePath, int]] = []
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState(1234)
files = []
for fname in list(pathlib.Path(root).iterdir()):
data = h5py.File(fname, "r")
if num_cols is not None and data["kspace"].shape[2] != num_cols:
continue
files.append(fname)
if custom_split is not None:
split_info = []
with open(f"activemri/data/splits/knee_singlecoil/{custom_split}.txt") as f:
for line in f:
split_info.append(line.rsplit("\n")[0])
files = [f for f in files if f.name in split_info]
if num_volumes is not None:
self.rng.shuffle(files)
files = files[:num_volumes]
for volume_i, fname in enumerate(sorted(files)):
data = h5py.File(fname, "r")
kspace = data["kspace"]
if num_rand_slices is None:
num_slices = kspace.shape[0]
self.examples += [(fname, slice_id) for slice_id in range(num_slices)]
else:
slice_ids = list(range(kspace.shape[0]))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
self.examples += [
(fname, slice_id) for slice_id in slice_ids[:num_rand_slices]
]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice_id = self.examples[i]
with h5py.File(fname, "r") as data:
kspace = data["kspace"][slice_id]
kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))
kspace = fastmri.ifftshift(kspace, dim=(0, 1))
target = torch.ifft(kspace, 2, normalized=False)
target = fastmri.ifftshift(target, dim=(0, 1))
# Normalize using mean of k-space in training data
target /= 7.072103529760345e-07
kspace /= 7.072103529760345e-07
# Environment expects numpy arrays. The code above was used with an older
# version of the environment to generate the results of the MICCAI'20 paper.
# So, to keep this consistent with the version in the paper, we convert
# the tensors back to numpy rather than changing the original code.
kspace = kspace.numpy()
target = target.numpy()
return self.transform(
kspace,
torch.zeros(kspace.shape[1]),
target,
dict(data.attrs),
fname.name,
slice_id,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import numpy as np
import torch
from . import singlecoil_knee_data
from . import transforms
__all__ = ["singlecoil_knee_data", "transforms"]
def transform_template(
kspace: List[np.ndarray] = None,
mask: torch.Tensor = None,
ground_truth: torch.Tensor = None,
attrs: List[Dict[str, Any]] = None,
fname: List[str] = None,
slice_id: List[int] = None,
):
"""Template for transform functions.
Args:
- kspace(list(np.ndarray)): A list of complex numpy arrays, one per k-space in the batch.
The length is the ``batch_size``, and array shapes are ``H x W x 2`` for single coil data,
and ``C x H x W x 2`` for multicoil data, where ``H`` denotes k-space height, ``W``
denotes k-space width, and ``C`` is the number of coils. Note that the width can differ
between batch elements, if ``num_cols`` is set to a tuple when creating the environment.
- mask(torch.Tensor): A tensor of binary column masks, where 1s indicate that the
corresponding k-space column should be selected. The shape is ``batch_size x 1 x maxW``,
for single coil data, and ``batch_size x 1 x 1 x maxW`` for multicoil data. Here ``maxW``
is the maximum k-space width returned by the environment.
- ground_truth(torch.Tensor): A tensor of ground truth 2D images. The shape is
``batch_size x 320 x 320``.
- attrs(list(dict)): A list of dictionaries with the attributes read from the fastMRI for
each image.
- fname(list(str)): A list of the filenames where the images where read from.
- slice_id(list(int)): A list with the slice ids in the files where each image was read
from.
Returns:
tuple(Any...): A tuple with any number of inputs required by the reconstructor model.
"""
pass
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import numpy as np
import pytest # noqa: F401
import torch
import activemri.envs.envs as envs
import activemri.envs.util as util
from . import mocks
def test_import_object_from_str():
ceil = util.import_object_from_str("math.ceil")
assert 3 == ceil(2.5)
det = util.import_object_from_str("numpy.linalg.det")
assert det(np.array([[1, 0], [0, 1]])) == 1
def test_random_cyclic_sampler_default_order():
alist = [0, 1, 2]
sampler = envs.CyclicSampler(alist, None, loops=10)
cnt = 0
for i, x in enumerate(sampler):
assert alist[x] == i % 3
cnt += 1
assert cnt == 30
def test_random_cyclic_sampler_default_given_order():
alist = [1, 2, 0]
sampler = envs.CyclicSampler(alist, order=[2, 0, 1], loops=10)
cnt = 0
for i, x in enumerate(sampler):
assert alist[x] == i % 3
cnt += 1
assert cnt == 30
def test_data_handler():
data = list(range(10))
batch_size = 2
loops = 3
handler = envs.DataHandler(data, None, batch_size=batch_size, loops=loops)
cnt = dict([(x, 0) for x in data])
for x in handler:
assert len(x) == batch_size
for t in x:
v = t.item()
cnt[v] = cnt[v] + 1
for x in cnt:
assert cnt[x] == loops
# noinspection PyProtectedMember,PyClassHasNoInit
class TestActiveMRIEnv:
def test_init_from_config_dict(self):
env = envs.ActiveMRIEnv((32, 64))
env._init_from_config_dict(mocks.config_dict)
assert env.reward_metric == "ssim"
assert type(env._reconstructor) == mocks.Reconstructor
assert env._reconstructor.option1 == 1
assert env._reconstructor.option2 == 0.5
assert env._reconstructor.option3 == "dummy"
assert env._reconstructor.option4
assert env._reconstructor.weights == "init"
assert env._reconstructor._eval
assert env._reconstructor.device == torch.device("cpu")
assert env._transform("x", "m") == ("x", "m")
batch_size = 3
shapes = [(1, 2) for _ in range(batch_size)]
mask = env._mask_func(shapes, "rng")
assert mask.shape == (batch_size, env._cfg["mask"]["args"]["size"])
def test_init_sets_action_space(self):
env = envs.ActiveMRIEnv((32, 64))
for i in range(64):
assert env.action_space.contains(i)
assert env.action_space.n == 64
def test_reset_and_step(self):
# the mock environment is set up to use mocks.Reconstructor
# and mocks.mask_function.
# The mask and data will be tensors of size D (env._tensor_size)
# Initial mask will be:
# [1 1 1 0 0 .... 0] (needs 7 actions)
# [1 1 0 0 0 .... 0] (needs 8 actions)
# Ground truth is X * ones(D, D)
# K-space is (X - 1) * ones(D D)
# Reconstruction is K-space + Mask. So, with the initial mask we have
# sum |reconstruction - gt| = D^2 - 3D for first element of batch,
# and = D^2 - 2D for second element.
env = mocks.MRIEnv(num_parallel_episodes=2, loops_train=1, num_train=2)
obs, _ = env.reset()
# env works with shape (batch, height, width, {real/img})
assert tuple(obs["reconstruction"].shape) == (
env.num_parallel_episodes,
env._tensor_size,
env._tensor_size,
2,
)
assert "ssim" in env._current_score
mask_idx0_initial_active = env._cfg["mask"]["args"]["how_many"]
mask_idx1_initial_active = mask_idx0_initial_active - 1
def expected_score(step):
# See explanation above, plus every steps adds one more 1 to mask.
s = env._tensor_size
total = s ** 2
return 2 * (
(total - (mask_idx0_initial_active + step) * s)
+ (total - (mask_idx1_initial_active + step) * s)
)
assert env._current_score["ssim"] == expected_score(0)
prev_score = env._current_score["ssim"]
for action in range(mask_idx0_initial_active, env._tensor_size):
obs, reward, done, _ = env.step(action)
assert env._current_score["ssim"] == expected_score(
action - mask_idx1_initial_active
)
assert reward == env._current_score["ssim"] - prev_score
prev_score = env._current_score["ssim"]
if action < 9:
assert done == [False, False]
else:
assert done == [True, False]
obs, reward, done, _ = env.step(mask_idx1_initial_active)
assert env._current_score["ssim"] == 0.0
assert reward == -prev_score
assert done == [True, True]
def test_training_loop_ends(self):
env = envs.ActiveMRIEnv((32, 64), num_parallel_episodes=3)
env._num_loops_train_data = 3
env._init_from_config_dict(mocks.config_dict)
env._compute_score_given_tensors = lambda x, y: {"mock": 0}
num_train = 10
tensor_size = env._cfg["mask"]["args"]["size"]
data_init_fn = mocks.make_data_init_fn(tensor_size, num_train, 0, 0)
env._setup_data_handlers(data_init_fn)
seen = dict([(x, 0) for x in range(num_train)])
for _ in range(1000):
obs, meta = env.reset()
if not obs:
cnt_seen = functools.reduce(lambda x, y: x + y, seen.values())
assert cnt_seen == num_train * env._num_loops_train_data
break
slice_ids = meta["slice_id"]
for slice_id in slice_ids:
assert slice_id < num_train
seen[slice_id] = seen[slice_id] + 1
for i in range(num_train):
assert seen[i] == env._num_loops_train_data
def test_alternate_loop_modes(self):
# This tests if the environment can change correctly between train, val, and test
# datasets.
num_train, num_val, num_test = 10, 7, 5
env = mocks.MRIEnv(
num_parallel_episodes=1,
loops_train=2,
num_train=num_train,
num_val=num_val,
num_test=num_test,
)
# For each iteration of train data we will do a full loop over validation
# and a partial loop over test.
seen_train = dict([(x, 0) for x in range(num_train)])
seen_val = dict([(x, 0) for x in range(num_val)])
seen_test = dict([(x, 0) for x in range(num_test)])
for i in range(1000):
env.set_training()
obs, meta = env.reset()
if not obs:
break
for slice_id in meta["slice_id"]:
seen_train[slice_id] = seen_train[slice_id] + 1
env.set_val()
for j in range(num_val + 1):
obs, meta = env.reset()
if not obs:
cnt_seen = functools.reduce(lambda x, y: x + y, seen_val.values())
assert cnt_seen == (i + 1) * num_val
break
assert j < num_val
for slice_id in meta["slice_id"]:
seen_val[slice_id] = seen_val[slice_id] + 1
# With num_test - 1 we check that next call starts from 0 index again
# even if not all images visited. One of the elements in test set should have
# never been seen (data_handler will permute the indices so we don't know
# which index it will be)
env.set_test()
for _ in range(num_test - 1):
obs, meta = env.reset()
assert obs
for slice_id in meta["slice_id"]:
seen_test[slice_id] = seen_test[slice_id] + 1
for i in range(num_train):
assert seen_train[i] == env._num_loops_train_data
for i in range(num_val):
assert seen_val[i] == env._num_loops_train_data * num_train
cnt_not_seen = 0
for i in range(num_test):
if seen_test[i] != 0:
assert seen_test[i] == env._num_loops_train_data * num_train
else:
cnt_not_seen += 1
assert cnt_not_seen == 1
def test_seed(self):
num_train = 10
env = mocks.MRIEnv(
num_parallel_episodes=1, loops_train=1, num_train=num_train, seed=0
)
def get_current_order():
order = []
for _ in range(num_train):
obs, _ = env.reset()
order.append(obs["reconstruction"].sum().int().item())
return order
order_1 = get_current_order()
env.seed(123)
order_2 = get_current_order()
env.seed(0)
order_3 = get_current_order()
assert set(order_1) == set(order_2)
assert any([a != b for a, b in zip(order_1, order_2)])
assert all([a == b for a, b in zip(order_1, order_3)])
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import torch
import activemri.envs.masks as masks
def test_update_masks_from_indices():
mask_1 = torch.tensor([[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], dtype=torch.uint8)
mask_2 = torch.tensor([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]], dtype=torch.uint8)
mask = torch.stack([mask_1, mask_2])
mask = masks.update_masks_from_indices(mask, np.array([2, 0]))
assert mask.shape == torch.Size([2, 3, 4])
expected = torch.tensor(
[[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]], dtype=torch.uint8
).repeat(2, 1, 1)
assert (mask - expected).sum().item() == 0
def test_sample_low_freq_masks():
for centered in [True, False]:
max_width = 20
mask_args = {
"max_width": max_width,
"width_dim": 1,
"min_cols": 1,
"max_cols": 4,
"centered": centered,
}
rng = np.random.RandomState()
widths = [10, 12, 18, 20]
seen_cols = set()
for i in range(1000):
dummy_shapes = [(0, w) for w in widths] # w is in args.width_dim
the_masks = masks.sample_low_frequency_mask(mask_args, dummy_shapes, rng)
assert the_masks.shape == (len(widths), 1, 20)
the_masks = the_masks.squeeze()
for j, w in enumerate(widths):
# Mask is symmetrical
assert torch.all(
the_masks[j, : w // 2]
== torch.flip(the_masks[j, w // 2 : w], dims=[0])
)
# Extra columns set to one so that they are not valid actions
assert the_masks[j, w:].sum().item() == max_width - w
# Check that the number of columns is in the correct range
active = the_masks[j, :w].sum().int().item()
assert active >= 2 * mask_args["min_cols"]
assert active <= 2 * mask_args["max_cols"]
seen_cols.add(active // 2)
# These masks should be either something like
# 1100000011|111111111 (not centered)
# 0000110000|111111111 (centered)
# The lines below check for this
prev = the_masks[j, 0]
changed = False
for k in range(1, w // 2):
cur = the_masks[j, k]
if cur != prev:
assert not changed
changed = True
prev = cur
assert changed
if centered:
assert not the_masks[j, 0]
else:
assert the_masks[j, 0]
# Check that masks were sampled with all possible number of active cols
assert len(seen_cols) == (mask_args["max_cols"] - mask_args["min_cols"] + 1)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest # noqa: F401
import torch
import activemri.baselines as baselines
def test_random():
policy = baselines.RandomPolicy()
bs = 4
mask = torch.zeros(bs, 10)
mask[:, :3] = 1
mask[0, :7] = 1
obs = {"mask": mask}
steps = 5
for i in range(steps):
action = policy(obs)
assert len(action) == bs
for j in range(bs):
if j > 0 or (j == 0 and i < 3):
assert obs["mask"][j, action[j]] == 0
obs["mask"][j, action[j]] = 1
assert obs["mask"].sum().item() == 34
def test_low_to_high_no_alternate():
policy = baselines.LowestIndexPolicy(alternate_sides=False, centered=False)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == 2 * i + 1
assert action[1] == 2 * i
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
def test_low_to_high_alternate():
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=False)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
order = [[1, 9, 3, 7, 5], [0, 8, 2, 6, 4]]
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == order[0][i]
assert action[1] == order[1][i]
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
def test_low_to_high_alternate_centered():
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=True)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
order = [[5, 3, 7, 1, 9], [6, 4, 8, 2, 0]]
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == order[0][i]
assert action[1] == order[1][i]
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Dict
import numpy as np
import torch
import activemri.envs.envs as envs
cfg_json_str = """
{
"data_location": "dummy_location",
"reconstructor": {
"cls": "tests.core.mocks.Reconstructor",
"options": {
"option1": 1,
"option2": 0.5,
"option3": "dummy",
"option4": true
},
"checkpoint_fname": "null",
"transform": "tests.core.mocks.transform"
},
"mask": {
"function": "tests.core.mocks.mask_func",
"args": {
"size": 10,
"how_many": 3
}
},
"reward_metric": "ssim",
"device": "cpu"
}
"""
config_dict = json.loads(cfg_json_str)
class Dataset:
def __init__(self, tensor_size, length):
self.tensor_size = tensor_size
self.length = length
def __len__(self):
return self.length
def __getitem__(self, item):
mock_kspace = (item + 1) * np.ones(
(self.tensor_size, self.tensor_size, 2) # 2 is for mocking (real, img.)
)
mock_mask = np.zeros(self.tensor_size)
mock_ground_truth = mock_kspace + 1
return mock_kspace, mock_mask, mock_ground_truth, {}, "fname", item
def make_data_init_fn(tensor_size, num_train, num_val, num_test):
train_data = Dataset(tensor_size, num_train)
val_data = Dataset(tensor_size, num_val)
test_data = Dataset(tensor_size, num_test)
def data_init_fn():
return train_data, val_data, test_data
return data_init_fn
# noinspection PyUnusedLocal
def mask_func(args, kspace_shapes, _rng, attrs=None):
batch_size = len(kspace_shapes)
mask = torch.zeros(batch_size, args["size"])
mask[0, : args["how_many"]] = 1
if batch_size > 1:
mask[1, : args["how_many"] - 1] = 1
return mask
def transform(kspace=None, mask=None, **_kwargs):
if isinstance(mask, torch.Tensor):
mask = mask.view(mask.shape[0], 1, -1, 1)
elif isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
if isinstance(kspace, list):
new_kspace = []
for array in kspace:
new_kspace.append(torch.from_numpy(array))
return torch.stack(new_kspace), mask
return kspace, mask
# noinspection PyMethodMayBeStatic
class Reconstructor:
def __init__(self, **kwargs):
self.option1 = kwargs["option1"]
self.option2 = kwargs["option2"]
self.option3 = kwargs["option3"]
self.option4 = kwargs["option4"]
self.weights = None
self._eval = None
self.device = None
self.state_dict = {}
def init_from_checkpoint(self, _checkpoint):
self.weights = "init"
def eval(self):
self._eval = True
def to(self, device):
self.device = device
def forward(self, kspace, mask):
return {"reconstruction": kspace + mask}
__call__ = forward
def load_state_dict(self):
pass
class MRIEnv(envs.ActiveMRIEnv):
def __init__(
self,
num_parallel_episodes,
loops_train,
num_train=1,
num_val=1,
num_test=1,
seed=None,
):
super().__init__(
(32, 64), num_parallel_episodes=num_parallel_episodes, seed=seed
)
self._num_loops_train_data = loops_train
self._init_from_config_dict(config_dict)
self._tensor_size = self._cfg["mask"]["args"]["size"]
data_init_fn = make_data_init_fn(
self._tensor_size, num_train, num_val, num_test
)
self._setup_data_handlers(data_init_fn)
@staticmethod
def _compute_score_given_tensors(
reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Dict[str, np.ndarray]:
return {"ssim": (reconstruction - ground_truth).abs().sum().numpy()}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import pytest # noqa: F401
import activemri.envs.util
def test_all_configs():
configs_root = "configs/"
for fname in os.listdir(configs_root):
with open(os.path.join(configs_root, fname), "r") as f:
cfg = json.load(f)
assert "data_location" in cfg
assert "device" in cfg
assert "reward_metric" in cfg
assert "mask" in cfg
mask_cfg = cfg["mask"]
try:
_ = activemri.envs.util.import_object_from_str(mask_cfg["function"])
except ModuleNotFoundError:
print(f"Mask function in config file {fname} was not found.")
assert False
assert "args" in mask_cfg and isinstance(mask_cfg["args"], dict)
assert "reconstructor" in cfg
reconstructor_cfg = cfg["reconstructor"]
assert "cls" in reconstructor_cfg
try:
_ = activemri.envs.util.import_object_from_str(reconstructor_cfg["cls"])
except ModuleNotFoundError:
print(f"Reconstructor class in config file {fname} was not found.")
assert False
assert "options" in reconstructor_cfg
assert "checkpoint_fname" in reconstructor_cfg
assert "transform" in reconstructor_cfg
try:
_ = activemri.envs.util.import_object_from_str(
reconstructor_cfg["transform"]
)
except ModuleNotFoundError:
print(f"Transform function in config file {fname} was not found.")
assert False
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import activemri.envs.envs as envs
class TestMICCAIEnv:
env = envs.MICCAI2020Env()
def test_miccai_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(batch[0][batch_idx], np.ndarray)
assert batch[0][batch_idx].shape == (
640,
368,
2,
) # k-space
assert isinstance(batch[2][batch_idx], np.ndarray)
assert batch[2][batch_idx].shape == (640, 368, 2) # ground truth image
# data.attrs
assert len(batch[3][batch_idx]) == 4
for key in ["norm", "max", "patient_id", "acquisition"]:
assert key in batch[3][batch_idx]
# file name
assert isinstance(batch[4][batch_idx], str)
# slice_id
assert isinstance(batch[5][batch_idx], int)
if i == 10:
break
def test_miccai_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (
self.env.num_parallel_episodes,
640,
368,
2,
)
assert obs["mask"].shape == (self.env.num_parallel_episodes, 368)
class TestSingleCoilKneeEnv:
env = envs.SingleCoilKneeEnv()
def test_singlecoil_knee_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
kspace, _, ground_truth, attrs, fname, slice_id = batch
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(kspace[batch_idx], np.ndarray)
assert np.all(
np.iscomplex(kspace[batch_idx][np.nonzero(kspace[batch_idx])])
)
assert kspace[batch_idx].shape in [(640, 368), (640, 372)] # k-space
assert isinstance(ground_truth[batch_idx], np.ndarray)
assert not np.any(np.iscomplex(ground_truth[batch_idx]))
assert ground_truth[batch_idx].shape == (320, 320) # ground_truth
# data.attrs
assert len(attrs[batch_idx]) == 8
for key in [
"acquisition",
"max",
"norm",
"patient_id",
"padding_left",
"padding_right",
"encoding_size",
"recon_size",
]:
assert key in attrs[batch_idx]
# file name
assert isinstance(fname[batch_idx], str)
# slice_id
assert isinstance(slice_id[batch_idx], int)
if i == 10:
break
def test_singlecoil_knee_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (self.env.num_parallel_episodes, 320, 320)
assert obs["mask"].shape in [
(self.env.num_parallel_episodes, 368),
(self.env.num_parallel_episodes, 372),
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "active-mri-acquisition"
copyright = "2020, Facebook AI Research"
author = "Facebook AI Research"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import pickle
from typing import cast
import numpy as np
import torch
import activemri.baselines as baselines
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--budget", type=int, default=100, help="How many k-space columns to acquire."
)
parser.add_argument(
"--num_parallel_episodes",
type=int,
default=1,
help="The number of episodes the environment runs in parallel",
)
parser.add_argument(
"--num_episodes",
type=int,
default=100,
help="How many batches of episodes to run in total.",
)
parser.add_argument(
"--baseline",
type=str,
choices=[
"random",
"random-lb",
"lowtohigh",
"evaluator",
"ds-ddqn",
"ss-ddqn",
"oracle",
],
help="The algorithm to evaluate.",
)
parser.add_argument(
"--evaluator_path",
type=str,
default=None,
help="Path to checkpoint for evalutor network.",
)
parser.add_argument(
"--baseline_device",
type=str,
default="cpu",
help="Which torch device to use for the baseline (if 'evaluator' or '*ddqn').",
)
parser.add_argument(
"--dqn_checkpoint_path",
type=str,
default=None,
help="Checkpoint for the DDQN agent.",
)
parser.add_argument("--legacy_model", action="store_true")
parser.add_argument(
"--oracle_num_samples",
type=int,
default=20,
help="If using the one step greedy oracle, how many actions to sample each step.",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Directory where results will be stored.",
)
parser.add_argument("--seed", type=int, default=0, help="Seed for the environment.")
parser.add_argument("--env", choices=["miccai", "miccai_extreme"])
args = parser.parse_args()
extreme = "_extreme" in args.env
env = envs.MICCAI2020Env(args.num_parallel_episodes, args.budget, extreme=extreme)
policy: baselines.Policy = None
if args.baseline == "random":
policy = baselines.RandomPolicy()
if args.baseline == "random-lb":
policy = baselines.RandomLowBiasPolicy(acceleration=3.0, centered=False)
if args.baseline == "lowtohigh":
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=False)
if args.baseline == "evaluator":
policy = baselines.CVPR19Evaluator(
args.evaluator_path,
torch.device(args.baseline_device),
add_mask=True,
)
if args.baseline == "oracle":
policy = baselines.OneStepGreedyOracle(
env, "ssim", num_samples=args.oracle_num_samples
)
if "ddqn" in args.baseline:
checkpoint_path = os.path.join(
args.dqn_checkpoint_path, "evaluation", "policy_best.pt"
)
checkpoint = torch.load(args.dqn_checkpoint_path)
options = checkpoint["options"]
if "miccai" in args.env:
initial_num_lines = 1 if "extreme" in args.env else 15
if args.legacy_model:
options.legacy_offset = initial_num_lines
policy = cast(baselines.DDQN, policy)
policy = baselines.DDQN(args.baseline_device, None, options)
policy.load_state_dict(checkpoint["dqn_weights"])
all_scores, all_img_idx = baselines.evaluate(
env, policy, args.num_episodes, args.seed, "test", verbose=True
)
os.makedirs(args.output_dir, exist_ok=True)
np.save(os.path.join(args.output_dir, "scores.npy"), all_scores)
with open(os.path.join(args.output_dir, "img_ids.pkl"), "wb") as f:
pickle.dump(all_img_idx, f)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import activemri.baselines.ddqn as ddqn
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--budget", type=int, default=10)
parser.add_argument("--num_parallel_episodes", type=int, default=4)
parser.add_argument("--training_dir", type=str, default=None)
parser.add_argument("--device", type=str, default=None)
parser.add_argument("--extreme_acc", action="store_true")
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
env = envs.MICCAI2020Env(
args.num_parallel_episodes,
args.budget,
extreme=args.extreme_acc,
seed=args.seed,
)
tester = ddqn.DDQNTester(env, args.training_dir, args.device)
tester()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
import activemri.baselines as mri_baselines
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--budget", type=int, default=10)
parser.add_argument("--num_parallel_episodes", type=int, default=4)
parser.add_argument("--device", type=str, default=None)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--extreme_acc", action="store_true")
parser.add_argument("--checkpoints_dir", type=str, default=None)
parser.add_argument("--mem_capacity", type=int, default=1000)
parser.add_argument(
"--dqn_model_type",
type=str,
choices=["simple_mlp", "evaluator"],
default="evaluator",
)
parser.add_argument(
"--reward_metric",
type=str,
choices=["mse", "ssim", "nmse", "psnr"],
default="ssim",
)
parser.add_argument("--resume", action="store_true")
parser.add_argument("--mask_embedding_dim", type=int, default=0)
parser.add_argument("--dqn_batch_size", type=int, default=2)
parser.add_argument("--dqn_burn_in", type=int, default=100)
parser.add_argument("--dqn_normalize", action="store_true")
parser.add_argument("--gamma", type=float, default=0.5)
parser.add_argument("--epsilon_start", type=float, default=1.0)
parser.add_argument("--epsilon_decay", type=int, default=10000)
parser.add_argument("--epsilon_end", type=float, default=0.001)
parser.add_argument("--dqn_learning_rate", type=float, default=0.001)
parser.add_argument("--num_train_steps", type=int, default=1000)
parser.add_argument("--num_test_episodes", type=int, default=2)
parser.add_argument("--dqn_only_test", action="store_true")
parser.add_argument("--dqn_weights_path", type=str, default=None)
parser.add_argument("--dqn_test_episode_freq", type=int, default=None)
parser.add_argument("--target_net_update_freq", type=int, default=5000)
parser.add_argument("--freq_dqn_checkpoint_save", type=int, default=1000)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
env = envs.MICCAI2020Env(
args.num_parallel_episodes,
args.budget,
obs_includes_padding=args.dqn_model_type == "evaluator",
extreme=args.extreme_acc,
)
env.seed(args.seed)
policy = mri_baselines.DDQNTrainer(args, env, torch.device(args.device))
policy()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import logging
from dataclasses import dataclass, field
from math import sqrt
from typing import List, Optional, Union
import torch
import torch.nn as nn
logger: logging.Logger = logging.getLogger(__name__)
@dataclass
class MtlConfigs:
mtl_model: str = "att_sp" # consider using enum
num_task_experts: int = 1
num_shared_experts: int = 1
expert_out_dims: List[List[int]] = field(default_factory=list)
self_exp_res_connect: bool = False
expert_archs: Optional[List[List[int]]] = None
gate_archs: Optional[List[List[int]]] = None
num_experts: Optional[int] = None
@dataclass(frozen=True)
class ArchInputs:
num_task: int = 3
task_mlp: List[int] = field(default_factory=list)
mtl_configs: Optional[MtlConfigs] = field(default=None)
# Parameters related to activation function
activation_type: str = "RELU"
class AdaTTSp(nn.Module):
"""
paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations"
paper link: https://doi.org/10.1145/3580305.3599769
Call Args:
inputs: inputs is a tensor of dimension
[batch_size, self.num_tasks, self.input_dim].
Experts in the same module share the same input.
outputs dimensions: [B, T, D_out]
Example::
AdaTTSp(
input_dim=256,
expert_out_dims=[[128, 128]],
num_tasks=8,
num_task_experts=2,
self_exp_res_connect=True,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_task_experts: int,
self_exp_res_connect: bool = True,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
logger.warning(
"AdaTTSp is noop! size of expert_out_dims which is the number of "
"extraction layers should be at least 1."
)
return
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
self.num_task_experts = num_task_experts
self.total_experts_per_layer: int = num_task_experts * num_tasks
self.self_exp_res_connect = self_exp_res_connect
self.experts = torch.nn.ModuleList()
self.gate_weights = torch.nn.ModuleList()
self_exp_weight_list = []
layer_input_dim = input_dim
for expert_out_dim in expert_out_dims:
self.experts.append(
torch.nn.ModuleList(
[
MLP(layer_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
self.gate_weights.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
torch.nn.Linear(
layer_input_dim, self.total_experts_per_layer
),
torch.nn.Softmax(dim=-1),
)
for _ in range(num_tasks)
]
)
) # self.gate_weights is of shape L X T, after we loop over all layers.
if self_exp_res_connect and num_task_experts > 1:
params = torch.empty(num_tasks, num_task_experts)
scale = sqrt(1.0 / num_task_experts)
torch.nn.init.uniform_(params, a=-scale, b=scale)
self_exp_weight_list.append(torch.nn.Parameter(params))
layer_input_dim = expert_out_dim[-1]
self.self_exp_weights = nn.ParameterList(self_exp_weight_list)
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
# all task expert outputs.
experts_out = torch.stack(
[
expert(inputs[:, expert_i // self.num_task_experts, :])
for expert_i, expert in enumerate(self.experts[layer_i])
],
dim=1,
) # [B * E (total experts) * D_out]
gates = torch.stack(
[
gate_weight(
inputs[:, task_i, :]
) # W ([B, D]) * S ([D, E]) -> G, dim is [B, E]
for task_i, gate_weight in enumerate(self.gate_weights[layer_i])
],
dim=1,
) # [B, T, E]
fused_experts_out = torch.bmm(
gates,
experts_out,
) # [B, T, E] X [B * E (total experts) * D_out] -> [B, T, D_out]
if self.self_exp_res_connect:
if self.num_task_experts > 1:
# residual from the linear combination of tasks' own experts.
self_exp_weighted = torch.einsum(
"te,bted->btd",
self.self_exp_weights[layer_i],
experts_out.view(
experts_out.size(0),
self.num_tasks,
self.num_task_experts,
-1,
), # [B * E (total experts) * D_out] -> [B * T * E_task * D_out]
) # bmm: [T * E_task] X [B * T * E_task * D_out] -> [B, T, D_out]
fused_experts_out = (
fused_experts_out + self_exp_weighted
) # [B, T, D_out]
else:
fused_experts_out = fused_experts_out + experts_out
inputs = fused_experts_out
return inputs
class AdaTTWSharedExps(nn.Module):
"""
paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations"
paper link: https://doi.org/10.1145/3580305.3599769
Call Args:
inputs: inputs is a tensor of dimension
[batch_size, self.num_tasks, self.input_dim].
Experts in the same module share the same input.
outputs dimensions: [B, T, D_out]
Example::
AdaTTWSharedExps(
input_dim=256,
expert_out_dims=[[128, 128]],
num_tasks=8,
num_shared_experts=1,
num_task_experts=2,
self_exp_res_connect=True,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_shared_experts: int,
num_task_experts: Optional[int] = None,
num_task_expert_list: Optional[List[int]] = None,
# Set num_task_expert_list for experimenting with a flexible number of
# experts for different task_specific units.
self_exp_res_connect: bool = True,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
logger.warning(
"AdaTTWSharedExps is noop! size of expert_out_dims which is the number of "
"extraction layers should be at least 1."
)
return
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
assert (num_task_experts is None) ^ (num_task_expert_list is None)
if num_task_experts is not None:
self.num_expert_list = [num_task_experts for _ in range(num_tasks)]
else:
# num_expert_list is guaranteed to be not None here.
# pyre-ignore
self.num_expert_list: List[int] = num_task_expert_list
self.num_expert_list.append(num_shared_experts)
self.total_experts_per_layer: int = sum(self.num_expert_list)
self.self_exp_res_connect = self_exp_res_connect
self.experts = torch.nn.ModuleList()
self.gate_weights = torch.nn.ModuleList()
layer_input_dim = input_dim
for layer_i, expert_out_dim in enumerate(expert_out_dims):
self.experts.append(
torch.nn.ModuleList(
[
MLP(layer_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
num_full_active_modules = (
num_tasks
if layer_i == self.num_extraction_layers - 1
else num_tasks + 1
)
self.gate_weights.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
torch.nn.Linear(
layer_input_dim, self.total_experts_per_layer
),
torch.nn.Softmax(dim=-1),
)
for _ in range(num_full_active_modules)
]
)
) # self.gate_weights is a 2d module list of shape L X T (+ 1), after we loop over all layers.
layer_input_dim = expert_out_dim[-1]
self_exp_weight_list = []
if self_exp_res_connect:
# If any tasks have number of experts not equal to 1, we learn linear combinations of native experts.
if any(num_experts != 1 for num_experts in self.num_expert_list):
for i in range(num_tasks + 1):
num_full_active_layer = (
self.num_extraction_layers - 1
if i == num_tasks
else self.num_extraction_layers
)
params = torch.empty(
num_full_active_layer,
self.num_expert_list[i],
)
scale = sqrt(1.0 / self.num_expert_list[i])
torch.nn.init.uniform_(params, a=-scale, b=scale)
self_exp_weight_list.append(torch.nn.Parameter(params))
self.self_exp_weights = nn.ParameterList(self_exp_weight_list)
self.expert_input_idx: List[int] = []
for i in range(num_tasks + 1):
self.expert_input_idx.extend([i for _ in range(self.num_expert_list[i])])
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
num_full_active_modules = (
self.num_tasks
if layer_i == self.num_extraction_layers - 1
else self.num_tasks + 1
)
# all task expert outputs.
experts_out = torch.stack(
[
expert(inputs[:, self.expert_input_idx[expert_i], :])
for expert_i, expert in enumerate(self.experts[layer_i])
],
dim=1,
) # [B * E (total experts) * D_out]
# gate weights for fusing all experts.
gates = torch.stack(
[
gate_weight(inputs[:, i, :]) # [B, D] * [D, E] -> [B, E]
for i, gate_weight in enumerate(self.gate_weights[layer_i])
],
dim=1,
) # [B, T (+ 1), E]
# add all expert gate weights with native expert weights.
if self.self_exp_res_connect:
prev_idx = 0
use_unit_naive_weights = all(
num_expert == 1 for num_expert in self.num_expert_list
)
for module_i in range(num_full_active_modules):
next_idx = self.num_expert_list[module_i] + prev_idx
if use_unit_naive_weights:
gates[:, module_i, prev_idx:next_idx] += torch.ones(
1, self.num_expert_list[module_i]
)
else:
gates[:, module_i, prev_idx:next_idx] += self.self_exp_weights[
module_i
][layer_i].unsqueeze(0)
prev_idx = next_idx
fused_experts_out = torch.bmm(
gates,
experts_out,
) # [B, T (+ 1), E (total)] X [B * E (total) * D_out] -> [B, T (+ 1), D_out]
inputs = fused_experts_out
return inputs
class MLP(nn.Module):
"""
Args:
input_dim (int):
mlp_arch (List[int]):
activation (str):
Call Args:
input (torch.Tensor): tensor of shape (B, I)
Returns:
output (torch.Tensor): MLP result
Example::
mlp = MLP(100, [100])
"""
def __init__(
self,
input_dim: int,
mlp_arch: List[int],
activation: str = "RELU",
bias: bool = True,
) -> None:
super().__init__()
mlp_net = []
for mlp_dim in mlp_arch:
mlp_net.append(
nn.Linear(in_features=input_dim, out_features=mlp_dim, bias=bias)
)
if activation == "RELU":
mlp_net.append(nn.ReLU())
else:
raise ValueError("only RELU is included currently")
input_dim = mlp_dim
self.mlp_net = nn.Sequential(*mlp_net)
def forward(
self,
input: torch.Tensor,
) -> torch.Tensor:
return self.mlp_net(input)
class SharedBottom(nn.Module):
def __init__(
self, input_dim: int, hidden_dims: List[int], num_tasks: int, activation: str
) -> None:
super().__init__()
self.bottom_projection = MLP(input_dim, hidden_dims, activation)
self.num_tasks: int = num_tasks
def forward(
self,
input: torch.Tensor,
) -> torch.Tensor:
# input dim [T, D_in]
# output dim [B, T, D_out]
return self.bottom_projection(input).unsqueeze(1).expand(-1, self.num_tasks, -1)
class CrossStitch(torch.nn.Module):
"""
cross-stitch
paper title: "Cross-stitch Networks for Multi-task Learning".
paper link: https://openaccess.thecvf.com/content_cvpr_2016/papers/Misra_Cross-Stitch_Networks_for_CVPR_2016_paper.pdf
"""
def __init__(
self,
input_dim: int,
expert_archs: List[List[int]],
num_tasks: int,
activation: str = "RELU",
) -> None:
super().__init__()
self.num_layers: int = len(expert_archs)
self.num_tasks = num_tasks
self.experts = torch.nn.ModuleList()
self.stitchs = torch.nn.ModuleList()
expert_input_dim = input_dim
for layer_ind in range(self.num_layers):
self.experts.append(
torch.nn.ModuleList(
[
MLP(
expert_input_dim,
expert_archs[layer_ind],
activation,
)
for _ in range(self.num_tasks)
]
)
)
self.stitchs.append(
torch.nn.Linear(
self.num_tasks,
self.num_tasks,
bias=False,
)
)
expert_input_dim = expert_archs[layer_ind][-1]
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""
input dim [B, T, D_in]
output dim [B, T, D_out]
"""
x = input
for layer_ind in range(self.num_layers):
expert_out = torch.stack(
[
expert(x[:, expert_ind, :]) # [B, D_out]
for expert_ind, expert in enumerate(self.experts[layer_ind])
],
dim=1,
) # [B, T, D_out]
stitch_out = self.stitchs[layer_ind](expert_out.transpose(1, 2)).transpose(
1, 2
) # [B, T, D_out]
x = stitch_out
return x
class MLMMoE(torch.nn.Module):
"""
Multi-level Multi-gate Mixture of Experts
This code implements a multi-level extension of the MMoE model, as described in the
paper titled "Modeling Task Relationships in Multi-task Learning with Multi-gate
Mixture-of-Experts".
Paper link: https://dl.acm.org/doi/10.1145/3219819.3220007
To run the original MMoE, use only one fusion level. For example, set expert_archs as
[[96, 48]].
To configure multiple fusion levels, set expert_archs as something like [[96], [48]].
"""
def __init__(
self,
input_dim: int,
expert_archs: List[List[int]],
gate_archs: List[List[int]],
num_tasks: int,
num_experts: int,
activation: str = "RELU",
) -> None:
super().__init__()
self.num_layers: int = len(expert_archs)
self.num_tasks: int = num_tasks
self.num_experts = num_experts
self.experts = torch.nn.ModuleList()
self.gates = torch.nn.ModuleList()
expert_input_dim = input_dim
for layer_ind in range(self.num_layers):
self.experts.append(
torch.nn.ModuleList(
[
MLP(
expert_input_dim,
expert_archs[layer_ind],
activation,
)
for _ in range(self.num_experts)
]
)
)
self.gates.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
MLP(
input_dim,
gate_archs[layer_ind],
activation,
),
torch.nn.Linear(
gate_archs[layer_ind][-1]
if gate_archs[layer_ind]
else input_dim,
self.num_experts,
),
torch.nn.Softmax(dim=-1),
)
for _ in range(
self.num_experts
if layer_ind < self.num_layers - 1
else self.num_tasks
)
]
)
)
expert_input_dim = expert_archs[layer_ind][-1]
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""
input dim [B, D_in]
output dim [B, T, D_out]
"""
x = input.unsqueeze(1).expand([-1, self.num_experts, -1]) # [B, E, D_in]
for layer_ind in range(self.num_layers):
expert_out = torch.stack(
[
expert(x[:, expert_ind, :]) # [B, D_out]
for expert_ind, expert in enumerate(self.experts[layer_ind])
],
dim=1,
) # [B, E, D_out]
gate_out = torch.stack(
[
gate(input) # [B, E]
for gate_ind, gate in enumerate(self.gates[layer_ind])
],
dim=1,
) # [B, T, E]
gated_out = torch.matmul(gate_out, expert_out) # [B, T, D_out]
x = gated_out
return x
class PLE(nn.Module):
"""
PLE module is based on the paper "Progressive Layered Extraction (PLE): A
Novel Multi-Task Learning (MTL) Model for Personalized Recommendations".
Paper link: https://doi.org/10.1145/3383313.3412236
PLE aims to address negative transfer and seesaw phenomenon in multi-task
learning. PLE distinguishes shared and task-specic experts explicitly and
adopts a progressive routing mechanism to extract and separate deeper
semantic knowledge gradually. When there is only one extraction layer, PLE
falls back to CGC.
Args:
input_dim: input embedding dimension
expert_out_dims (List[List[int]]): dimension of an expert's output at
each layer. This list's length equals the number of extraction
layers
num_tasks: number of tasks
num_task_experts: number of experts for each task module at each layer.
* If the number of experts is the same for all tasks, use an
integer here.
* If the number of experts is different for different tasks, use a
list of integers here.
num_shared_experts: number of experts for shared module at each layer
Call Args:
inputs: inputs is a tensor of dimension [batch_size, self.num_tasks + 1,
self.input_dim]. Task specific module inputs are placed first, followed
by shared module input. (Experts in the same module share the same input)
Returns:
output: output of extraction layer to be feed into task-specific tower
networks. It's a list of tensors, each of which is for one task.
Example::
PLE(
input_dim=256,
expert_out_dims=[[128]],
num_tasks=8,
num_task_experts=2,
num_shared_experts=2,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_task_experts: Union[int, List[int]],
num_shared_experts: int,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
raise ValueError("Expert out dims cannot be empty list")
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
self.num_task_experts = num_task_experts
if type(num_task_experts) is int:
self.total_experts_per_layer: int = (
num_task_experts * num_tasks + num_shared_experts
)
else:
self.total_experts_per_layer: int = (
sum(num_task_experts) + num_shared_experts
)
assert len(num_task_experts) == num_tasks
self.num_shared_experts = num_shared_experts
self.experts = nn.ModuleList()
expert_input_dim = input_dim
for expert_out_dim in expert_out_dims:
self.experts.append(
nn.ModuleList(
[
MLP(expert_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
expert_input_dim = expert_out_dim[-1]
self.gate_weights = nn.ModuleList()
selector_dim = input_dim
for i in range(self.num_extraction_layers):
expert_out_dim = expert_out_dims[i]
# task specific gates.
if type(num_task_experts) is int:
gate_weights_in_layer = nn.ModuleList(
[
nn.Sequential(
nn.Linear(
selector_dim, num_task_experts + num_shared_experts
),
nn.Softmax(dim=-1),
)
for i in range(num_tasks)
]
)
else:
gate_weights_in_layer = nn.ModuleList(
[
nn.Sequential(
nn.Linear(
selector_dim, num_task_experts[i] + num_shared_experts
),
nn.Softmax(dim=-1),
)
for i in range(num_tasks)
]
)
# Shared module gates. Note last layer has only task specific module gates for task towers later.
if i != self.num_extraction_layers - 1:
gate_weights_in_layer.append(
nn.Sequential(
nn.Linear(selector_dim, self.total_experts_per_layer),
nn.Softmax(dim=-1),
)
)
self.gate_weights.append(gate_weights_in_layer)
selector_dim = expert_out_dim[-1]
if type(self.num_task_experts) is list:
experts_idx_2_task_idx = []
for i in range(num_tasks):
# pyre-ignore
experts_idx_2_task_idx += [i] * self.num_task_experts[i]
experts_idx_2_task_idx += [num_tasks] * num_shared_experts
self.experts_idx_2_task_idx: List[int] = experts_idx_2_task_idx
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
# all task specific and shared experts' outputs.
# Note first num_task_experts * num_tasks experts are task specific,
# last num_shared_experts experts are shared.
if type(self.num_task_experts) is int:
experts_out = torch.stack(
[
self.experts[layer_i][expert_i](
inputs[
:,
# pyre-ignore
min(expert_i // self.num_task_experts, self.num_tasks),
:,
]
)
for expert_i in range(self.total_experts_per_layer)
],
dim=1,
) # [B * E (num experts) * D_out]
else:
experts_out = torch.stack(
[
self.experts[layer_i][expert_i](
inputs[
:,
self.experts_idx_2_task_idx[expert_i],
:,
]
)
for expert_i in range(self.total_experts_per_layer)
],
dim=1,
) # [B * E (num experts) * D_out]
gates_out = []
# Loop over all the gates in the layer. Note for the last layer,
# there is no shared gating network.
prev_idx = 0
for gate_i in range(len(self.gate_weights[layer_i])):
# This is for shared gating network, which uses all the experts.
if gate_i == self.num_tasks:
selected_matrix = experts_out # S_share
# This is for task gating network, which only uses shared and its own experts.
else:
if type(self.num_task_experts) is int:
task_experts_out = experts_out[
:,
# pyre-ignore
(gate_i * self.num_task_experts) : (gate_i + 1)
# pyre-ignore
* self.num_task_experts,
:,
] # task specific experts
else:
# pyre-ignore
next_idx = prev_idx + self.num_task_experts[gate_i]
task_experts_out = experts_out[
:,
prev_idx:next_idx,
:,
] # task specific experts
prev_idx = next_idx
shared_experts_out = experts_out[
:,
-self.num_shared_experts :,
:,
] # shared experts
selected_matrix = torch.concat(
[task_experts_out, shared_experts_out], dim=1
) # S_k with dimension of [B * E_selected * D_out]
gates_out.append(
torch.bmm(
self.gate_weights[layer_i][gate_i](
inputs[:, gate_i, :]
).unsqueeze(dim=1),
selected_matrix,
)
# W * S -> G
# [B, 1, E_selected] X [B * E_selected * D_out] -> [B, 1, D_out]
)
inputs = torch.cat(gates_out, dim=1) # [B, T, D_out]
return inputs
class CentralTaskArch(nn.Module):
def __init__(
self,
mtl_configs: MtlConfigs,
opts: ArchInputs,
input_dim: int,
) -> None:
super().__init__()
self.opts = opts
assert len(mtl_configs.expert_out_dims) > 0, "expert_out_dims is empty."
self.num_tasks: int = opts.num_task
self.mtl_model: str = mtl_configs.mtl_model
logger.info(f"mtl_model is {mtl_configs.mtl_model}")
expert_out_dims: List[List[int]] = mtl_configs.expert_out_dims
# AdaTT-sp
# consider consolidating the implementation of att_sp and att_g.
if mtl_configs.mtl_model == "att_sp":
self.mtl_arch: nn.Module = AdaTTSp(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
self_exp_res_connect=mtl_configs.self_exp_res_connect,
activation=opts.activation_type,
)
# AdaTT-general
elif mtl_configs.mtl_model == "att_g":
self.mtl_arch: nn.Module = AdaTTWSharedExps(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
num_shared_experts=mtl_configs.num_shared_experts,
self_exp_res_connect=mtl_configs.self_exp_res_connect,
activation=opts.activation_type,
)
# PLE
elif mtl_configs.mtl_model == "ple":
self.mtl_arch: nn.Module = PLE(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
num_shared_experts=mtl_configs.num_shared_experts,
activation=opts.activation_type,
)
# cross-stitch
elif mtl_configs.mtl_model == "cross_st":
self.mtl_arch: nn.Module = CrossStitch(
input_dim=input_dim,
expert_archs=mtl_configs.expert_out_dims,
num_tasks=self.num_tasks,
activation=opts.activation_type,
)
# multi-layer MMoE or MMoE
elif mtl_configs.mtl_model == "mmoe":
self.mtl_arch: nn.Module = MLMMoE(
input_dim=input_dim,
expert_archs=mtl_configs.expert_out_dims,
gate_archs=[[] for i in range(len(mtl_configs.expert_out_dims))],
num_tasks=self.num_tasks,
num_experts=mtl_configs.num_shared_experts,
activation=opts.activation_type,
)
# shared bottom
elif mtl_configs.mtl_model == "share_bottom":
self.mtl_arch: nn.Module = SharedBottom(
input_dim,
[dim for dims in expert_out_dims for dim in dims],
self.num_tasks,
opts.activation_type,
)
else:
raise ValueError("invalid model type")
task_modules_input_dim = expert_out_dims[-1][-1]
self.task_modules: nn.ModuleList = nn.ModuleList(
[
nn.Sequential(
MLP(
task_modules_input_dim, self.opts.task_mlp, opts.activation_type
),
torch.nn.Linear(self.opts.task_mlp[-1], 1),
)
for i in range(self.num_tasks)
]
)
def forward(
self,
task_arch_input: torch.Tensor,
) -> List[torch.Tensor]:
if self.mtl_model in ["att_sp", "cross_st"]:
task_arch_input = task_arch_input.unsqueeze(1).expand(
-1, self.num_tasks, -1
)
elif self.mtl_model in ["att_g", "ple"]:
task_arch_input = task_arch_input.unsqueeze(1).expand(
-1, self.num_tasks + 1, -1
)
task_specific_outputs = self.mtl_arch(task_arch_input)
task_arch_output = [
task_module(task_specific_outputs[:, i, :])
for i, task_module in enumerate(self.task_modules)
]
return task_arch_output |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import pickle
from fig_1 import *
def evaluate_function_in_polytope():
# Generate projection matrix
np.random.seed(3)
B0 = np.random.randn(2, 100) # A REMBO projection
B = B0 / np.sqrt((B0 ** 2).sum(axis=0)) # A hypersphere projection
# Generate grid in low-d space
b = 60. # Something big to be sure we capture the whole range
density = 1000
grid_x = np.linspace(-b, b, density)
grid_y = np.linspace(-b, b, density)
grid2_x, grid2_y = np.meshgrid(grid_x, grid_y)
X = np.array([grid2_x.flatten(), grid2_y.flatten()]).transpose()
# Project up
Y = (np.linalg.pinv(B) @ X.transpose()).transpose()
z = ((Y<-1).any(axis=1) | (Y>1).any(axis=1)) # Points outside box bounds
Y[z, :] = 0. # Set them to 0 for now; we'll drop them later
fs = branin_centered(Y[:, :2])
# Drop points that violate polytope constraints in (1)
fs[z] = np.nan
fs = fs.reshape(grid2_x.shape)
# Same thing with B0 instead of B.
Y = (np.linalg.pinv(B0) @ X.transpose()).transpose()
z = ((Y<-1).any(axis=1) | (Y>1).any(axis=1)) # Points outside box bounds
Y[z, :] = 0. # Set them to 0 for now; we'll drop them later
fs_B0 = branin_centered(Y[:, :2])
fs_B0[z] = np.nan # Drop points outside the box bounds
fs_B0 = fs_B0.reshape(grid2_x.shape)
with open('data/figS4_sim_output.pckl', 'wb') as fout:
pickle.dump((grid_x, grid_y, fs, fs_B0), fout)
def make_fig_S4():
with open('data/figS4_sim_output.pckl', 'rb') as fin:
grid_x, grid_y, fs, fs_B0 = pickle.load(fin)
fig = plt.figure(figsize=(5.5, 2))
plt.set_cmap('RdBu_r')
ax = fig.add_subplot(121)
CS1 = ax.contourf(grid_x, grid_y, np.log(fs_B0), levels=np.linspace(-1, 6, 30))
ax.grid(False)
ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_xlim([-45, 45])
ax.set_ylim([-35, 35])
ax = fig.add_subplot(122)
CS1 = ax.contourf(grid_x, grid_y, np.log(fs), levels=np.linspace(-1, 6, 30))
ax.grid(False)
ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_xlim([-62, 62])
ax.set_ylim([-50, 50])
plt.subplots_adjust(right=0.99, top=0.99, left=0.1, bottom=0.17, wspace=0.45)
plt.savefig('pdfs/new_embedding.pdf', pad_inches=0)
if __name__ == '__main__':
#evaluate_function_in_polytope() # Takes 20s to run, creates data/figS4_sim_output.pckl
make_fig_S4()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import numpy as np
from fig_1 import *
def hesbo_branin(X, mode):
# In 2-D, HESBO has 3 possible embeddings:
# Full rank, span x1 and x2
# x1=x2
# x1=-x2
Y = X.copy()
if mode == 1:
pass # All is well here!
elif mode == 2:
Y[:, 0] = Y[:, 1]
elif mode == 3:
Y[:, 0] = -Y[:, 1]
return branin_centered(Y)
def make_fig_S1():
# Evaluate the branin function on the grid under the three possible embeddings
grid_xhes, grid_yhes, fs_hesbo1 = eval_f_on_grid(hesbo_branin, [-1, 1], [-1, 1], {'mode': 1}, 2, density=1000)
grid_xhes, grid_yhes, fs_hesbo2 = eval_f_on_grid(hesbo_branin, [-1, 1], [-1, 1], {'mode': 3}, 2, density=1000)
grid_xhes, grid_yhes, fs_hesbo3 = eval_f_on_grid(hesbo_branin, [-1, 1], [-1, 1], {'mode': 2}, 2, density=1000)
fig = plt.figure(figsize=(5.5, 1.8), facecolor='w', edgecolor='w')
plt.set_cmap('RdBu_r')
for i, fs in enumerate([fs_hesbo1, fs_hesbo2, fs_hesbo3]):
ax = fig.add_subplot(1, 3, i + 1)
CS1 = ax.contourf(grid_xhes, grid_yhes, np.log(fs), levels=np.linspace(-1, 6, 30))
ax.grid(False)
ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_xlim([-1, 1])
ax.set_xticks([-1, -0.5, 0, 0.5, 1])
ax.set_yticks([-1, -0.5, 0, 0.5, 1])
if i == 0:
ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_ylim([-1, 1])
else:
ax.set_yticklabels([])
plt.subplots_adjust(right=0.98, top=0.975, left=0.1, bottom=0.195)
plt.savefig('pdfs/hesbo_embeddings.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_S1()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rc
import matplotlib
rc('font', family='serif', style='normal', variant='normal', weight='normal', stretch='normal', size=8)
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.labelsize'] = 7
matplotlib.rcParams['ytick.labelsize'] = 7
matplotlib.rcParams['axes.titlesize'] = 9
plot_method_names = [
'ALEBO (ours)',
'REMBO',
'HeSBO, $d_e$=$d$',
'HeSBO, $d_e$=$2d$',
'REMBO-$\phi k_{\Psi}$',
'REMBO-$\gamma k_{\Psi}$',
'EBO',
'Add-GP-UCB',
'SMAC',
'CMA-ES',
'TuRBO',
'Sobol',
'CoordinateLineBO',
'RandomLineBO',
'DescentLineBO',
]
plot_colors={
'ALEBO (ours)': plt.cm.tab20(0),
'REMBO': plt.cm.tab20(1),
'HeSBO, $d_e$=$d$': plt.cm.tab20(2),
'HeSBO, $d_e$=$2d$': plt.cm.tab20(3),
'REMBO-$\phi k_{\Psi}$': plt.cm.tab20(4),
'REMBO-$\gamma k_{\Psi}$': plt.cm.tab20(5),
'EBO': plt.cm.tab20(6),
'Add-GP-UCB': plt.cm.tab20(7),
'SMAC': plt.cm.tab20(8),
'CMA-ES': plt.cm.tab20(9),
'TuRBO': plt.cm.tab20(10),
'Sobol': plt.cm.tab20(14),
'CoordinateLineBO': plt.cm.tab20(12),
'RandomLineBO': plt.cm.tab20(16),
'DescentLineBO': plt.cm.tab20(18),
}
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
from plot_config import *
def make_fig_S5():
with open('data/fig4_sim_output.pckl', 'rb') as fin:
res = pickle.load(fin)
nsamp = 1000
fig = plt.figure(figsize=(5.5, 2))
for i, d in enumerate([2, 6, 10]):
ax = fig.add_subplot(1, 3, i + 1)
x = [d_use for d_use in range(21) if d_use >= d]
y1 = np.array([res['rembo'][(100, d, d_use)] for d_use in x])
y2 = np.array([res['hesbo'][(100, d, d_use)] for d_use in x])
y3 = np.array([res['unitsphere'][(100, d, d_use)] for d_use in x])
y1err = 2 * np.sqrt(y1 * (1 - y1) / nsamp)
y2err = 2 * np.sqrt(y2 * (1 - y2) / nsamp)
y3err = 2 * np.sqrt(y3 * (1 - y3) / nsamp)
ax.errorbar(x, y1, yerr=y1err, color=plt.cm.tab10(0), marker='')
ax.errorbar(x, y2, yerr=y2err, color=plt.cm.tab10(1), marker='')
ax.errorbar(x, y3, yerr=y3err, color=plt.cm.tab10(2), marker='')
ax.set_title(r'$d={d}$'.format(d=d))
if i == 0:
ax.set_ylabel('Probability embedding\ncontains optimizer', fontsize=9)
ax.legend(['REMBO', 'HeSBO', r'Hypersphere'], loc='lower right', fontsize=7)
ax.set_xlabel(r'$d_e$', fontsize=9)
ax.set_xlim([0, 21])
ax.set_ylim([-0.02, 1.02])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
if i > 0:
ax.set_yticklabels([])
ax.grid(True, alpha=0.2)
plt.subplots_adjust(right=0.99, bottom=0.17, left=0.10, top=0.89, wspace=0.1)
plt.savefig('pdfs/lp_solns_ext.pdf', pad_inches=0)
if __name__ == '__main__':
# Assumes fig_4 has been run
make_fig_S5()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import pickle
import time
import torch
import numpy as np
import cvxpy as cp # 1.0.25
from plot_config import *
def gen_A_rembo(d: int, D: int) -> np.ndarray:
A = torch.randn(D, d, dtype=torch.double)
return A.numpy()
def gen_A_hesbo(d: int, D:int) -> np.ndarray:
A = torch.zeros((D, d), dtype=torch.double)
h = torch.randint(d, size=(D,))
s = 2 * torch.randint(2, size=(D,), dtype=torch.double) - 1
for i in range(D):
A[i, h[i]] = s[i]
return A.numpy()
def gen_A_unitsphere(d: int, D: int) -> np.ndarray:
A = np.random.randn(D, d) # A REMBO projection _up_
A = A / np.sqrt((A ** 2).sum(axis=1))[:, None]
return A
def A_contains_xstar(xstar, A, perm):
d = len(xstar)
D = A.shape[0]
Acon = np.zeros((d, D))
Acon[:d, :d] = np.diag(np.ones(d))
# Shuffle columns, to place true embedding on columns perm
Acon = Acon[:, perm]
Q = A @ np.linalg.pinv(A) - np.eye(D)
A_eq = np.vstack((Acon, Q))
b_eq = np.hstack((xstar, np.zeros(D)))
c = np.zeros(D)
x = cp.Variable(D)
prob = cp.Problem(
cp.Minimize(c.T * x),
[
A_eq @ x == b_eq,
x >= -1,
x <= 1,
],
)
prob.solve(solver=cp.ECOS)
if prob.status == cp.OPTIMAL:
has_opt = True
elif prob.status == cp.INFEASIBLE:
has_opt = False
else:
raise ValueError(prob.status)
return has_opt, prob
def p_A_contains_optimizer(d, D, d_use, gen_A_fn, nsamp):
num_feas = 0.
for _ in range(nsamp):
# Sample location of optimizer uniformly on [-1, 1]^d
xstar = np.random.rand(d) * 2 - 1
# Sample features of embedding (first d) uniformly at random
perm = list(range(D))
np.random.shuffle(perm)
# Generate projection matrix
A = gen_A_fn(d_use, D)
has_opt, _ = A_contains_xstar(xstar, A, perm)
num_feas += float(has_opt)
return num_feas / nsamp
def run_simulation1():
t1 = time.time()
nsamp = 1000
res = {'rembo': {}, 'hesbo': {}, 'unitsphere': {}}
D = 100
for d in [2, 6, 10]:
for d_use in range(1, 21):
if d_use < d:
continue
res['rembo'][(D, d, d_use)] = p_A_contains_optimizer(
d=d, D=D, d_use=d_use, gen_A_fn=gen_A_rembo, nsamp=nsamp
)
res['hesbo'][(D, d, d_use)] = p_A_contains_optimizer(
d=d, D=D, d_use=d_use, gen_A_fn=gen_A_hesbo, nsamp=nsamp
)
res['unitsphere'][(D, d, d_use)] = p_A_contains_optimizer(
d=d, D=D, d_use=d_use, gen_A_fn=gen_A_unitsphere, nsamp=nsamp
)
print(time.time() - t1)
with open('data/fig4_sim_output.pckl', 'wb') as fout:
pickle.dump(res, fout)
def make_fig_4():
with open('data/fig4_sim_output.pckl', 'rb') as fin:
res = pickle.load(fin)
nsamp = 1000
fig = plt.figure(figsize=(2.63, 1.45))
for i, d in enumerate([2, 6]):
ax = fig.add_subplot(1, 2, i + 1)
x = [d_use for d_use in range(21) if d_use >= d]
y1 = np.array([res['rembo'][(100, d, d_use)] for d_use in x])
y2 = np.array([res['hesbo'][(100, d, d_use)] for d_use in x])
y3 = np.array([res['unitsphere'][(100, d, d_use)] for d_use in x])
y1err = 2 * np.sqrt(y1 * (1 - y1) / nsamp)
y2err = 2 * np.sqrt(y2 * (1 - y2) / nsamp)
y3err = 2 * np.sqrt(y3 * (1 - y3) / nsamp)
ax.errorbar(x, y1, yerr=y1err, color=plt.cm.tab10(0), marker='')
ax.errorbar(x, y2, yerr=y2err, color=plt.cm.tab10(1), marker='')
ax.errorbar(x, y3, yerr=y3err, color=plt.cm.tab10(2), marker='')
ax.set_title(r'$d={d}$'.format(d=d))
if i == 0:
ax.set_ylabel(r'$P_{\textrm{opt}}$', fontsize=9)
ax.legend(['REMBO', 'HeSBO', r'Hypersphere'], loc='lower right', fontsize=5)
ax.set_xlabel(r'$d_e$', fontsize=9)
ax.set_xlim([0, 21])
ax.set_ylim([-0.02, 1.02])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
if i > 0:
ax.set_yticklabels([])
ax.grid(True, alpha=0.2)
plt.subplots_adjust(right=0.99, bottom=0.23, left=0.17, top=0.87, wspace=0.1)
plt.savefig('pdfs/lp_solns.pdf', pad_inches=0)
if __name__ == '__main__':
#run_simulation1() # Will take about 30mins, produces data/fig4_sim_output.pckl
make_fig_4()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
from plot_config_nr import *
def make_fig_5():
# Load in the benchmark results
res = {}
for fname in ['hartmann6_1000', 'branin_gramacy_100']:
with open(f'../benchmarks/results/{fname}_aggregated_results.json', 'r') as fin:
res.update(object_from_json(json.load(fin)))
# A map from method idx in plot_method_names to the name used in res
method_idx_to_res_name = {
0: 'ALEBO',
1: 'REMBO',
2: 'HeSBO, d=d',
3: 'HeSBO, d=2d',
4: 'rrembos_standard_kPsi',
5: 'rrembos_reverse_kPsi',
6: 'ebo',
7: 'addgpucb',
8: 'smac',
9: 'cmaes',
10: 'turbo',
11: 'Sobol',
12: 'coordinatelinebo',
13: 'randomlinebo',
14: 'descentlinebo',
}
# Make the figure
fig = plt.figure(figsize=(5.5, 3.7))
####### Branin, D=100
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(234)
res_h = res['Branin, D=100']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = res_h.objective_at_true_best[res_name]
f = Y.mean(axis=0)
x = np.arange(1, 51)
color = plot_colors[m]
ax1.plot(x, f, color=color, label=m)
parts = ax2.violinplot(positions=[idx], dataset=Y[:, 49], showmeans=True)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor(color)
for field in ['cmeans', 'cmaxes', 'cmins', 'cbars']:
parts[field].set_color(color)
ax1.set_xlim([0, 51])
ax1.set_ylabel('Best value found', fontsize=7)
ax1.set_xlabel('Function evaluations', fontsize=7)
ax1.axhline(y=0.397887, c='gray', ls='--')
ax1.grid(alpha=0.2, zorder=-10)
ax1.set_ylim([0, 7])
ax2.set_xticks(range(12))
ax2.set_xticklabels([])
ax2.set_ylabel('Final value', fontsize=7)
ax2.grid(alpha=0.2, zorder=-10)
ax2.set_xticklabels([plot_method_names[i] for i in range(12)], fontsize=6)
ax2.xaxis.set_tick_params(rotation=90)
# Make the legend
custom_lines = []
names = []
for i in range(12):
names.append(plot_method_names[i])
custom_lines.append(
Line2D([0], [0], color=plot_colors[plot_method_names[i]], lw=2)
)
order = range(12)
names = [names[o] for o in order]
custom_lines = [custom_lines[o] for o in order]
ax1.legend(custom_lines, names, ncol=6, fontsize=5.5, bbox_to_anchor=(3.52, -2.26))
ax1.set_title('Branin, $d$=2, $D$=100', fontsize=8)
####### Hartmann6, D=1000
ax1 = fig.add_subplot(232)
ax2 = fig.add_subplot(235)
res_h = res['Hartmann6, D=1000']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = res_h.objective_at_true_best[res_name]
f = Y.mean(axis=0)
x = np.arange(1, 201)
color = plot_colors[m]
ax1.plot(x, f, color=color, label=m)
parts = ax2.violinplot(positions=[idx], dataset=Y[:, 199], showmeans=True)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor(color)
for field in ['cmeans', 'cmaxes', 'cmins', 'cbars']:
parts[field].set_color(color)
ax1.set_xlim([0, 201])
#ax1.set_ylabel('Best value found', fontsize=9)
ax1.set_xlabel('Function evaluations', fontsize=7)
ax1.axhline(y=-3.32237, c='gray', ls='--')
ax1.grid(alpha=0.2, zorder=-10)
ax1.set_ylim([-3.5, -0.5])
ax2.set_xticks(range(12))
ax2.set_xticklabels([])
#ax2.set_ylabel('Final value', fontsize=9)
ax2.grid(alpha=0.2, zorder=-10)
ax2.set_xticklabels([plot_method_names[i] for i in range(12)], fontsize=6)
ax2.xaxis.set_tick_params(rotation=90)
ax1.set_title('Hartmann6, $d$=6, $D$=1000', fontsize=8)
####### Gramacy, D=100
ax1 = fig.add_subplot(233)
ax2 = fig.add_subplot(236)
res_h = res['Gramacy, D=100']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = res_h.objective_at_true_best[res_name]
f = Y.mean(axis=0)
x = np.arange(1, 51)
color = plot_colors[m]
ax1.plot(x, f, color=color, label=m)
parts = ax2.violinplot(positions=[idx], dataset=Y[:, 49], showmeans=True)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor(color)
for field in ['cmeans', 'cmaxes', 'cmins', 'cbars']:
parts[field].set_color(color)
ax1.set_xlim([0, 51])
#ax1.set_ylabel('Best value found', fontsize=9)
ax1.set_xlabel('Function evaluations', fontsize=7)
ax1.set_ylim([0.58, 1])
ax1.axhline(y=0.5998, c='gray', ls='--')
ax1.grid(alpha=0.2, zorder=-10)
ax2.set_xticks(range(12))
ax2.set_xticklabels([plot_method_names[i] for i in range(12)], fontsize=6)
ax2.xaxis.set_tick_params(rotation=90)
#ax2.set_ylabel('Final value', fontsize=9)
ax2.grid(alpha=0.2, zorder=-10)
ax1.set_title('Gramacy, $d$=2, $D$=100', fontsize=8)
plt.subplots_adjust(right=0.995, bottom=0.3, left=0.07, top=0.94, wspace=0.25, hspace=0.45)
plt.savefig('pdfs/benchmark_results_t.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_5()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
def table_S1_data():
with open('../benchmarks/results/all_aggregated_results.json', 'r') as fin:
res = object_from_json(json.load(fin))
for D in [100, 1000]:
pname = f'Hartmann6, D={D}'
print('-----', pname)
for m, ts in res[pname].gen_times.items():
# Get average total time for fit and gen
t = np.mean(ts)
t += np.mean(res[pname].fit_times[m])
# Divide by 200 to be time per iteration
t /= 200.
print(f'{m}: {t}')
if __name__ == '__main__':
table_S1_data()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import numpy as np
from ax.utils.measurement.synthetic_functions import branin, hartmann6
from plot_config import *
def branin_centered(X):
# Evaluate branin problem, scaled to X \in [-1, 1]^2
# Map from [-1, 1]^2 to [[-5, 10], [0, 15]]
assert X.min() >= -1
assert X.max() <= 1
Xu = (X + 1) / 2.
Xu *= 15
Xu[:, 0] -= 5
return branin(Xu)
def hartmann6_centered(X):
# Evaluate hartmann6 problem, scaled to X \in [-1, 1]^2
# Map from [-1, 1]^6 to [0, 1]^6
assert X.min() >= -1
assert X.max() <= 1
Xu = (X + 1) / 2.
return hartmann6(Xu)
def rembo_branin(X, A):
# Map from low-d to high-D
Y = (A @ X.transpose()).transpose()
# Clip to [-1, 1]
Y = np.clip(Y, a_min=-1, a_max=1)
# Evaluate Branin on first two components
return branin_centered(Y[:, :2])
def rembo_hartmann6(X, A):
# Map from low-d to high-D
Y = (A @ X.transpose()).transpose()
# Clip to [-1, 1]
Y = np.clip(Y, a_min=-1, a_max=1)
# Evaluate Hartmann6 on first six components
return hartmann6_centered(Y[:, :6])
def eval_f_on_grid(f, bounds_x, bounds_y, f_kwargs, d, density=100):
# prepare the grid on which to evaluate the problem
grid_x = np.linspace(bounds_x[0], bounds_x[1], density)
grid_y = np.linspace(bounds_y[0], bounds_y[1], density)
grid2_x, grid2_y = np.meshgrid(grid_x, grid_y)
X = np.array([grid2_x.flatten(), grid2_y.flatten()]).transpose()
if d > 2:
# Add in the other components, just at 0
X = np.hstack((X, np.zeros((X.shape[0], d - 2))))
fs = f(X, **f_kwargs).reshape(grid2_x.shape)
return grid_x, grid_y, fs
def make_fig_1():
## Branin
# Evaluate the usual Branin problem, but scaled to [-1, 1]^2
grid_x1, grid_y1, fs_branin = eval_f_on_grid(branin_centered, [-1, 1], [-1, 1], {}, 2)
# Generate a REMBO projection matrix
D = 100
np.random.seed(1)
A_b = np.random.randn(D, 2)
# Evaluate the function across the low-d space
bounds = [-np.sqrt(2), np.sqrt(2)]
grid_x2, grid_y2, fs_rembo = eval_f_on_grid(rembo_branin, bounds, bounds, {'A': A_b}, 2)
## Hartmann6
# Evaluate the usual Hartmann6 problem, but scaled to [-1, 1]^6
grid_x1h, grid_y1h, fs_hartmann6 = eval_f_on_grid(hartmann6_centered, [-1, 1], [-1, 1], {}, 6)
# Generate a REMBO projection matrix
D = 100
A_h = np.random.randn(D, 6)
# Evaluate the function across the low-d space
bounds = [-np.sqrt(6), np.sqrt(6)]
grid_x2h, grid_y2h, fs_rembo_h = eval_f_on_grid(rembo_hartmann6, bounds, bounds, {'A': A_h}, 6)
# Make the figure
fig = plt.figure(figsize=(5.5, 1.2), facecolor='w', edgecolor='w')
plt.set_cmap('RdBu_r')
### Branin
ax = fig.add_subplot(141)
CS1 = ax.contourf(grid_x1, grid_y1, np.log(fs_branin), levels=np.linspace(-1, 6, 30))
ax.grid(False)
#ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_xlim([-1, 1])
#ax.set_xticks([-1, -0.5, 0, 0.5, 1])
ax.set_xticks([])
#ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_ylim([-1, 1])
ax.set_yticks([])
#ax.set_yticks([-1, -0.5, 0, 0.5, 1])
ax.set_title(r'Branin function, $d$=2')
ax = fig.add_subplot(142)
CS1 = ax.contourf(grid_x2, grid_y2, np.log(fs_rembo), levels=np.linspace(-1, 6, 30))
ax.grid(False)
#ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_xlim([-np.sqrt(2), np.sqrt(2)])
ax.set_xticks([])
#ax.set_xticks([-1.4, -1, -0.5, 0, 0.5, 1, 1.4])
#ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_ylim([-np.sqrt(2), np.sqrt(2)])
ax.set_yticks([])
#ax.set_yticks([-1.4, -1, -0.5, 0, 0.5, 1, 1.4])
ax.set_title('REMBO embedding,\n$D$=100, $d_e$=2')
### Hartmann6
ax = fig.add_subplot(143)
CS1f = ax.contourf(grid_x1h, grid_y1h, fs_hartmann6, levels=np.linspace(-1.2, 0., 20))
ax.grid(False)
#ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_xlim([-1, 1])
ax.set_xticks([])
#ax.set_xticks([-1, -0.5, 0, 0.5, 1])
#ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_ylim([-1, 1])
ax.set_yticks([])
#ax.set_yticks([-1, -0.5, 0, 0.5, 1])
ax.set_title(r'Hartmann6 function, $d$=6')
ax = fig.add_subplot(144)
CS1f = ax.contourf(grid_x2h, grid_y2h, fs_rembo_h, levels=np.linspace(-1.2, 0., 20))
ax.grid(False)
#ax.set_xlabel(r'$x_1$', fontsize=9)
ax.set_xlim([-np.sqrt(6), np.sqrt(6)])
ax.set_xticks([])
#ax.set_xticks([-2, -1, 0, 1, 2,])
#ax.set_ylabel(r'$x_2$', fontsize=9)
ax.set_ylim([-np.sqrt(6), np.sqrt(6)])
ax.set_yticks([])
#ax.set_yticks([-2, -1, 0, 1, 2,])
ax.set_title('REMBO embedding,\n$D$=100, $d_e$=6')
fig.subplots_adjust(wspace=0.13, top=0.74, bottom=0.05, right=0.99, left=0.01)
plt.savefig('pdfs/rembo_illustrations_w.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_1()
|
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
from plot_config import *
def make_nasbench_figure():
with open('../benchmarks/results/nasbench_aggregated_results.json', 'r') as fin:
res = object_from_json(json.load(fin))
# A map from method idx in plot_method_names to the name used in res
method_idx_to_res_name = {
1: 'REMBO',
3: 'HeSBO',
9: 'cmaes',
10: 'turbo',
11: 'Sobol',
0: 'ALEBO',
}
plot_method_names[3] = 'HeSBO'
plot_colors['HeSBO'] = plt.cm.tab20(3)
fig = plt.figure(figsize=(2.96, 2.0))
ax1 = fig.add_subplot(111)
method_idx = 1
method_names_used = []
for i, m in method_idx_to_res_name.items():
f = np.nanmean(res[m], axis=0)
sem = np.nanstd(res[m], axis=0) / np.sqrt(res[m].shape[0])
x = np.arange(1, 51)
mname = plot_method_names[i]
color = plot_colors[mname]
ax1.plot(x, f, color=color, label=mname)
ax1.errorbar(x, f, yerr=2 * sem, color=color, alpha=0.5, ls='')
ax1.set_xlim([0, 51])
ax1.set_ylabel('Best feasible test accuracy', fontsize=9)
ax1.set_xlabel('Function evaluations', fontsize=9)
#ax1.legend(bbox_to_anchor=(1.0, 1.24), ncol=4, fontsize=6, columnspacing=1.65)
ax1.legend(ncol=2, loc='lower right', fontsize=7)
ax1.set_ylim([0.92, 0.936])
ax1.set_yticks([0.92, 0.925, 0.93, 0.935])
ax1.set_yticklabels(['92.0\%', '92.5\%', '93.0\%', '93.5\%'])
ax1.grid(alpha=0.2, zorder=-10)
plt.subplots_adjust(right=0.98, bottom=0.17, left=0.19, top=0.98)
plt.savefig(f'pdfs/nas.pdf', pad_inches=0)
#plt.show()
if __name__ == '__main__':
make_nasbench_figure()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
from plot_config import *
def extract_sensitivity_results():
res = {}
for fname in [
'branin_gramacy_100',
'sensitivity',
]:
with open(f'../benchmarks/results/{fname}_aggregated_results.json', 'r') as fin:
res.update(object_from_json(json.load(fin)))
# Results for D=100
ys1 = {}
for d in [2, 3, 4, 5, 6, 7, 8]:
if d == 4:
ys1[d] = res['Branin, D=100'].objective_at_true_best['ALEBO']
else:
ys1[d] = res['Branin, D=100_sensitivity'].objective_at_true_best[f'ALEBO, d={d}']
# Results for d_e=4
ys2 = {}
for D in [50, 100, 200, 500, 1000]:
if D == 100:
ys2[D] = res['Branin, D=100'].objective_at_true_best['ALEBO']
else:
ys2[D] = res[f'Branin, D={D}_sensitivity'].objective_at_true_best['ALEBO']
return ys1, ys2
def make_fig_S8():
ys1, ys2 = extract_sensitivity_results()
fig = plt.figure(figsize=(5.5, 2.2))
ax = fig.add_subplot(121)
x = np.arange(1, 51)
for d_e in [2, 3, 4, 6, 8]:
ax.plot(x, ys1[d_e].mean(axis=0), label=f'$d_e={d_e}$')
ax.set_ylim([0, 7])
ax.set_yticks([0, 2, 4, 6])
ax.legend(fontsize=7)
ax.set_title(r'Branin, $D=100$')
ax.set_ylabel('Best value found', fontsize=9)
ax.set_xlabel('Function evaluations', fontsize=9)
ax.axhline(y=0.397887, c='gray', ls='--')
ax.grid(alpha=0.2, zorder=-10)
ax.set_xlim([0, 51])
ax = fig.add_subplot(122)
for D in [50, 100, 200, 500, 1000]:
ax.plot(x, ys2[D].mean(axis=0), label=f'$D={D}$')
ax.set_title(r'Branin, $d_e=4$')
ax.set_ylim([0, 7])
ax.legend(fontsize=7)
ax.set_xlabel('Function evaluations', fontsize=9)
ax.axhline(y=0.397887, c='gray', ls='--')
ax.grid(alpha=0.2, zorder=-10)
ax.set_yticks([0, 2, 4, 6])
ax.set_xlim([0, 51])
ax.set_yticklabels([])
plt.subplots_adjust(right=0.995, bottom=0.16, left=0.06, top=0.91, wspace=0.05)
plt.savefig('pdfs/branin_by_d_D_traces.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_S8()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import numpy as np
from plot_config import *
def make_fig_2():
# Run the simulation
np.random.seed(1)
Ds = [20, 100, 1000]
ds = list(range(1, 6))
nsamp = 1000
p_interior = {}
for D in Ds:
for d in ds:
p_interior[(D, d)] = 0.
for _ in range(nsamp):
# Generate a REMBO projection
A = np.random.randn(D, d)
# Sample a point in [-sqrt(d), sqrt(d)]^d
x = (np.random.rand(d) * 2 - 1) * np.sqrt(d)
# Project up
z = A @ x
# Check if satisfies box bounds
if z.min() >= -1 and z.max() <= 1:
p_interior[(D, d)] += 1
p_interior[(D, d)] /= nsamp
# Make the figure
fig = plt.figure(figsize=(2., 1.55))
ax = fig.add_subplot(111)
ax.grid(alpha=0.5)
for i, D in enumerate(Ds):
ax.plot(ds, [p_interior[(D, d)] for d in ds], 'x-', c=plt.cm.tab10(i))
ax.legend([r'$D=20$', r'$D=100$', r'$D=1000$'], fontsize=7)
ax.set_xlabel(r'Embedding dimension $d_e$', fontsize=9)
ax.set_ylabel('Probability projection\nsatisfies box bounds', fontsize=9)
plt.subplots_adjust(right=0.99, bottom=0.22, left=0.3, top=0.94)
plt.savefig('pdfs/rembo_p_interior.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_2()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import time
import numpy as np
import torch
import pickle
from ax.models.torch.alebo import ALEBO
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.models.torch.botorch import BotorchModel
from botorch.test_functions.synthetic import Hartmann
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from torch import Tensor
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.models.exact_gp import ExactGP
from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood
from gpytorch.kernels.rbf_kernel import RBFKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from botorch.fit import fit_gpytorch_model
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from plot_config import *
def highDhartmann6(X):
# X \in [-1, 1]^D
h = Hartmann()
Xd = (X[:, :6] + 1) / 2.
return h.evaluate_true(Xd)
def gen_train_test_sets(B, ntrain, ntest, seed_train=1000, seed_test=2000):
# Generate training points
m1 = ALEBOInitializer(B=B.numpy(), seed=seed_train)
train_X = torch.tensor(m1.gen(n=ntrain, bounds=[])[0], dtype=torch.double)
train_Y = highDhartmann6(train_X)
# Standardize train Y
mu = train_Y.mean()
sigma = train_Y.std()
train_Y = (train_Y - mu) / sigma
train_Y = train_Y.unsqueeze(1)
train_Yvar = 1e-7 * torch.ones(train_Y.shape)
# Generate test points
m2 = ALEBOInitializer(B=B.numpy(), seed=seed_test)
test_X = torch.tensor(m2.gen(n=ntest, bounds=[])[0], dtype=torch.double)
test_Y = highDhartmann6(test_X)
return train_X, train_Y, train_Yvar, test_X, test_Y, mu, sigma
def fit_and_predict_alebo(B, train_X, train_Y, train_Yvar, test_X, mu, sigma):
m = ALEBO(B=B)
m.fit([train_X], [train_Y], [train_Yvar], [], [], [], [], [])
f, var = m.predict(test_X)
# Return predictions, un-standardized
return f.squeeze() * sigma + mu, var.squeeze() * sigma ** 2
def fit_and_predict_map(B, train_X, train_Y, train_Yvar, test_X, mu, sigma):
m = ALEBO(B=B, laplace_nsamp=1) # laplace_nsamp=1 uses MAP estimate
m.fit([train_X], [train_Y], [train_Yvar], [], [], [], [], [])
f, var = m.predict(test_X)
# Return predictions, un-standardized
return f.squeeze() * sigma + mu, var.squeeze() * sigma ** 2
class ARDRBFGP(BatchedMultiOutputGPyTorchModel, ExactGP):
"""A GP with fixed observation noise and an ARD RBF kernel."""
def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None:
self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar)
self._set_dimensions(train_X=train_X, train_Y=train_Y)
train_X, train_Y, train_Yvar = self._transform_tensor_args(
X=train_X, Y=train_Y, Yvar=train_Yvar
)
likelihood = FixedNoiseGaussianLikelihood(
noise=train_Yvar, batch_shape=self._aug_batch_shape
)
ExactGP.__init__(
self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood
)
self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape)
self.covar_module = ScaleKernel(
base_kernel=RBFKernel(
ard_num_dims=train_X.shape[-1],
batch_shape=self._aug_batch_shape,
),
batch_shape=self._aug_batch_shape,
)
self.to(train_X)
def forward(self, x: Tensor) -> MultivariateNormal:
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
def get_and_fit_ARDRBF(
Xs, Ys, Yvars, task_features=None, fidelity_features=None, refit_model=None, state_dict=None,
fidelity_model_id=None, metric_names=None,
):
m = ARDRBFGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0])
mll = ExactMarginalLogLikelihood(m.likelihood, m)
mll = fit_gpytorch_model(mll)
return m
def fit_and_predict_ARDRBF(B, train_X, train_Y, train_Yvar, test_X, mu, sigma):
# Project training data down to the embedding
BX = train_X @ B.t()
m = BotorchModel(model_constructor=get_and_fit_ARDRBF)
# Fit ARD RBF model on data in embedding
m.fit([BX], [train_Y], [train_Yvar], [], [], [], [], [])
# Predict on test points in embedding
f, var = m.predict(test_X @ B.t())
# Return predictions, un-standardized
return f.squeeze() * sigma + mu, var.squeeze() * sigma ** 2
def run_simulation():
D = 100
d = 6
ntrain = 100
ntest = 50
# Get projection
torch.manual_seed(1000)
B0 = torch.randn(d, D, dtype=torch.double)
B = B0 / torch.sqrt((B0 ** 2).sum(dim=0))
# Get fixed train/test data
train_X, train_Y, train_Yvar, test_X, test_Y, mu, sigma = gen_train_test_sets(B, ntrain, ntest)
# Predict with each model
f1, var1 = fit_and_predict_alebo(B, train_X, train_Y, train_Yvar, test_X, mu, sigma)
f2, var2 = fit_and_predict_map(B, train_X, train_Y, train_Yvar, test_X, mu, sigma)
f3, var3 = fit_and_predict_ARDRBF(B, train_X, train_Y, train_Yvar, test_X, mu, sigma)
# Save outcome
with open('data/fig3_sim_output.pckl', 'wb') as fout:
pickle.dump((test_Y, f1, var1, f2, var2, f3, var3), fout)
def make_fig_3():
# Load in simulation results
with open('data/fig3_sim_output.pckl', 'rb') as fin:
(test_Y, f1, var1, f2, var2, f3, var3) = pickle.load(fin)
fig = plt.figure(figsize=(2.63, 1.45))
ax = fig.add_subplot(121)
ax.errorbar(
x=test_Y.numpy(), y=f3.numpy(), yerr = 2 * np.sqrt(var3.numpy()),
c='gray', lw=1, ls='', marker='.', mfc='k', mec='k', ms=3
)
x0 = -2.5
x1 = 0.5
ax.plot([x0, x1], [x0, x1], '-', zorder=-5, alpha=0.5, c='steelblue', lw=2)
ax.set_xlim([x0, x1])
ax.set_ylim([x0, x1])
ax.set_yticks([0, -1, -2])
ax.set_xlabel('True value', fontsize=9)
ax.set_ylabel('Model prediction', fontsize=9)
ax.set_title('ARD RBF', fontsize=9)
ax.grid(True, alpha=0.2)
ax = fig.add_subplot(122)
ax.errorbar(
x=test_Y.numpy(), y=f1.numpy(), yerr = 2 * np.sqrt(var1.numpy()),
c='gray', lw=1, ls='', marker='.', mfc='k', mec='k', ms=3
)
x0 = -3
x1 = 1
ax.plot([x0, x1], [x0, x1], '-', zorder=-5, alpha=0.5, c='steelblue', lw=2)
ax.set_xlim([x0, x1])
ax.set_ylim([x0, x1])
ax.set_title('Mahalanobis', fontsize=9)
ax.set_xticks([-3, -2, -1, 0, 1])
ax.set_xlabel('True value', fontsize=9)
ax.grid(True, alpha=0.2)
plt.subplots_adjust(right=0.99, bottom=0.24, left=0.16, top=0.87, wspace=0.3)
plt.savefig('pdfs/ard_mahalanobis.pdf', pad_inches=0)
if __name__ == '__main__':
#run_simulation() # This will take ~20s, produces data/fig3_sim_output.pckl
make_fig_3()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
from fig_S8 import *
def make_fig_S9():
ys1, ys2 = extract_sensitivity_results()
d_es = [2, 3, 4, 5, 6, 7, 8]
mus_de = []
sems_de = []
for d_e in d_es:
Y = ys1[d_e][:, 49]
mus_de.append(Y.mean())
sems_de.append(Y.std() / np.sqrt(len(Y)))
Ds = [50, 100, 200, 500, 1000]
mus_D = []
sems_D = []
for D in Ds:
Y = ys2[D][:, 49]
mus_D.append(Y.mean())
sems_D.append(Y.std() / np.sqrt(len(Y)))
fig = plt.figure(figsize=(5.5, 1.8))
ax = fig.add_subplot(121)
ax.errorbar(d_es, mus_de, yerr=2*np.array(sems_de))
ax.set_ylim([0.3, 2.85])
ax.set_yticks([0.5, 1.0, 1.5, 2.0, 2.5])
ax.set_xlabel(r'Embedding dimension $d_e$', fontsize=9)
ax.set_title(r'Branin, $D=100$')
ax.set_ylabel('Best value found', fontsize=9)
ax.grid(alpha=0.2)
ax = fig.add_subplot(122)
ax.errorbar(Ds, mus_D, yerr=2*np.array(sems_D))
ax.set_ylim([0.3, 2.85])
ax.set_yticks([0.5, 1.0, 1.5, 2.0, 2.5])
ax.set_yticklabels([])
ax.set_xlabel(r'Ambient dimension $D$', fontsize=9)
ax.set_title('Branin, $d_e=4$')
ax.grid(alpha=0.2)
ax.set_xticks([50, 200, 500, 1000])
plt.subplots_adjust(right=0.93, bottom=0.19, left=0.12, top=0.89, wspace=0.1)
plt.savefig('pdfs/branin_by_D_d.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_S9()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
from plot_config import *
def extract_ablation_results():
with open(f'../benchmarks/results/ablation_aggregated_results.json', 'r') as fin:
res = object_from_json(json.load(fin))
# Results
ys = {
'ALEBO': res['Branin, D=100_ablation'].objective_at_true_best['ALEBO, base'],
'Ablation: Matern kernel': res['Branin, D=100_ablation'].objective_at_true_best['ALEBO, kernel ablation'],
'Ablation: Normal projection': res['Branin, D=100_ablation'].objective_at_true_best['ALEBO, projection ablation'],
}
return ys
def make_fig_S_ablation():
ys = extract_ablation_results()
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_subplot(111)
x = np.arange(1, 51)
for k, y in ys.items():
f = y.mean(axis=0)
sem = y.std(axis=0) / np.sqrt(y.shape[0])
ax.errorbar(x, f, yerr=2 * sem, label=k)
ax.set_ylim([0, 7])
ax.set_yticks([0, 2, 4, 6])
ax.legend(fontsize=7, loc='lower left')
ax.set_title(r'Branin, $D=100$')
ax.set_ylabel('Best value found', fontsize=9)
ax.set_xlabel('Function evaluations', fontsize=9)
ax.axhline(y=0.397887, c='gray', ls='--')
ax.grid(alpha=0.2, zorder=-10)
ax.set_xlim([0, 51])
plt.subplots_adjust(right=0.995, bottom=0.16, left=0.1, top=0.91, wspace=0.05)
plt.savefig('pdfs/branin_ablation_traces.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_S_ablation()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
from plot_config import *
def make_fig_S2():
# Load in simulation results
with open('data/fig3_sim_output.pckl', 'rb') as fin:
(test_Y, f1, var1, f2, var2, f3, var3) = pickle.load(fin)
fig = plt.figure(figsize=(5.5, 2))
ax = fig.add_subplot(131)
ax.errorbar(
x=test_Y.numpy(), y=f3.numpy(), yerr = 2 * np.sqrt(var3.numpy()),
c='gray', lw=1, ls='', marker='.', mfc='k', mec='k', ms=3
)
x0 = -2.5
x1 = 0.5
ax.plot([x0, x1], [x0, x1], '-', zorder=-5, alpha=0.5, c='steelblue', lw=2)
ax.set_xlim([x0, x1])
ax.set_ylim([x0, x1])
ax.set_xlabel('True value', fontsize=9)
ax.set_ylabel('Model prediction', fontsize=9)
ax.set_title('ARD RBF', fontsize=9)
ax = fig.add_subplot(132)
ax.errorbar(
x=test_Y.numpy(), y=f2.numpy(), yerr = 2 * np.sqrt(var2.numpy()),
c='gray', lw=1, ls='', marker='.', mfc='k', mec='k', ms=3
)
x0 = -2.5
x1 = 0.5
ax.plot([x0, x1], [x0, x1], '-', zorder=-5, alpha=0.5, c='steelblue', lw=2)
ax.set_xlim([x0, x1])
ax.set_ylim([x0, x1])
ax.set_xlabel('True value', fontsize=9)
ax.set_title('Mahalanobis\npoint estimate', fontsize=9)
ax = fig.add_subplot(133)
ax.errorbar(
x=test_Y.numpy(), y=f1.numpy(), yerr = 2 * np.sqrt(var1.numpy()),
c='gray', lw=1, ls='', marker='.', mfc='k', mec='k', ms=3
)
x0 = -3.
x1 = 1
ax.plot([x0, x1], [x0, x1], '-', zorder=-5, alpha=0.5, c='steelblue', lw=2)
ax.set_xlim([x0, x1])
ax.set_ylim([x0, x1])
ax.set_title('Mahalanobis\nposterior sampled', fontsize=9)
ax.set_xticks([-3, -2, -1, 0, 1])
ax.set_xlabel('True value', fontsize=9)
plt.subplots_adjust(right=0.99, bottom=0.17, left=0.1, top=0.84, wspace=0.3)
plt.savefig('pdfs/model_predictions.pdf', pad_inches=0)
if __name__ == '__main__':
# Assumes fig_3 has already been run
make_fig_S2()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
from fig_4 import *
def run_simulation():
t1 = time.time()
nsamp = 1000
res = {'unitsphere': {}}
for D in [50, 100, 200]:
for d in range(2, 19, 2):
for d_use in range(2, 21, 2):
if d_use < d:
continue
res['unitsphere'][(D, d, d_use)] = p_A_contains_optimizer(
d=d, D=D, d_use=d_use, gen_A_fn=gen_A_unitsphere, nsamp=nsamp
)
with open('data/figS6_sim_output.pckl', 'wb') as fout:
pickle.dump(res, fout)
print(time.time() - t1)
def make_fig_S6():
with open('data/figS6_sim_output.pckl', 'rb') as fin:
res = pickle.load(fin)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4,figsize=(5.5, 2),
gridspec_kw={"width_ratios":[1, 1, 1, 0.15]})
axes = [ax1, ax2, ax3]
for i, D in enumerate([50, 100, 200]):
ds = []
duses = []
ps = []
for d in range(2, 19, 2):
for d_use in range(2, 21, 2):
if d_use < d:
continue
ds.append(d)
duses.append(d_use)
ps.append(res['unitsphere'][(D, d, d_use)])
cntr = axes[i].tricontourf(duses, ds, ps, levels=np.linspace(0, 1.001, 21), cmap='viridis')
axes[i].set_title(f'$D={D}$')
if i == 0:
axes[i].set_yticks([2, 6, 10, 14, 18])
axes[i].set_ylabel(r'True subspace dimension $d$', fontsize=9)
else:
axes[i].set_yticks([2, 6, 10, 14, 18])
axes[i].set_yticklabels([])
axes[i].grid(alpha=0.2, zorder=-2)
axes[i].set_xlabel(r'Embedding $d_e$')
axes[i].set_xticks([2, 6, 10, 14, 18])
ax4.patch.set_visible(False)
ax4.set_yticks([])
ax4.set_xticks([])
fig.colorbar(cntr, ax=ax4, ticks=[0, 0.25, 0.5, 0.75, 1.], fraction=1)
plt.subplots_adjust(right=0.97, bottom=0.2, left=0.07, top=0.89, wspace=0.1)
plt.savefig('pdfs/lp_solns_D.pdf', pad_inches=0)
if __name__ == '__main__':
#run_simulation() # This takes about 3hrs and produces data/figS6_sim_output.pckl
make_fig_S6()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from ax.storage.json_store.decoder import object_from_json
from plot_config import *
def make_fig_S7():
# Load in the benchmark results
res = {}
for fname in [
'hartmann6_1000',
'branin_gramacy_100',
'hartmann6_100',
'hartmann6_random_subspace_1000',
]:
with open(f'../benchmarks/results/{fname}_aggregated_results.json', 'r') as fin:
res.update(object_from_json(json.load(fin)))
# A map from method idx in plot_method_names to the name used in res
method_idx_to_res_name = {
0: 'ALEBO',
1: 'REMBO',
2: 'HeSBO, d=d',
3: 'HeSBO, d=2d',
4: 'rrembos_standard_kPsi',
5: 'rrembos_reverse_kPsi',
6: 'ebo',
7: 'addgpucb',
8: 'smac',
9: 'cmaes',
10: 'turbo',
11: 'Sobol',
12: 'coordinatelinebo',
13: 'randomlinebo',
14: 'descentlinebo',
}
# Make the figure
fig = plt.figure(figsize=(5.5, 7.5))
####### Branin, D=100
ax1 = fig.add_subplot(511)
res_h = res['Branin, D=100']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = np.log(res_h.objective_at_true_best[res_name] - 0.397887)
f = Y.mean(axis=0)
sem = Y.std(axis=0) / np.sqrt(Y.shape[0])
x = np.arange(1, 51)
color = plot_colors[m]
ax1.plot(x, f, color=color, label=m)
ax1.errorbar(x[4::5], f[4::5], yerr=2 * sem[4::5], color=color, alpha=0.5, ls='')
ax1.set_xlim([0, 51])
ax1.set_ylim([-6, 2])
ax1.set_ylabel('Log regret', fontsize=9)
ax1.grid(alpha=0.2, zorder=-10)
ax1.set_title(r'Branin, $d$=2, $D$=100')
####### Hartmann6, D=1000
ax2 = fig.add_subplot(512)
res_h = res['Hartmann6, D=1000']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = np.log(res_h.objective_at_true_best[res_name] - (-3.32237))
f = Y.mean(axis=0)
sem = Y.std(axis=0) / np.sqrt(Y.shape[0])
x = np.arange(1, 201)
color = plot_colors[m]
ax2.plot(x, f, color=color, label=m)
ax2.errorbar(x[9::10], f[9::10], yerr=2 * sem[9::10], color=color, alpha=0.5, ls='')
ax2.set_xlim([0, 201])
ax2.set_ylim([-2.5, 1.7])
ax2.set_ylabel('Log regret', fontsize=9)
ax2.grid(alpha=0.2, zorder=-10)
ax2.set_title(r'Hartmann6, $d$=6, $D$=1000')
####### Gramacy, D=100
ax3 = fig.add_subplot(513)
res_h = res['Gramacy, D=100']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = np.log(res_h.objective_at_true_best[res_name] - 0.5998)
f = Y.mean(axis=0)
sem = Y.std(axis=0) / np.sqrt(Y.shape[0])
x = np.arange(1, 51)
color = plot_colors[m]
ax3.plot(x, f, color=color, label=m)
ax3.errorbar(x[4::5], f[4::5], yerr=2 * sem[4::5], color=color, alpha=0.5, ls='')
ax3.set_xlim([0, 51])
ax3.set_ylabel('Log regret', fontsize=9)
ax3.grid(alpha=0.2, zorder=-10)
ax3.set_title(r'Gramacy, $d$=2, $D=100$')
####### Hartmann6, D=100
ax4 = fig.add_subplot(514)
res_h = res['Hartmann6, D=100']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = np.log(res_h.objective_at_true_best[res_name] - (-3.32237))
f = Y.mean(axis=0)
sem = Y.std(axis=0) / np.sqrt(Y.shape[0])
x = np.arange(1, 201)
color = plot_colors[m]
ax4.plot(x, f, color=color, label=m)
ax4.errorbar(x[9::10], f[9::10], yerr=2 * sem[9::10], color=color, alpha=0.5, ls='')
ax4.set_xlim([0, 201])
ax4.set_ylim([-4, 1.7])
ax4.set_ylabel('Log regret', fontsize=9)
ax4.grid(alpha=0.2, zorder=-10)
ax4.set_title(r'Hartmann6, $d$=6, $D$=100')
# Add the legend
ax4.legend(bbox_to_anchor=(1.44, 5.405), fontsize=8)
####### Hartmann6 random subspace, D=1000
ax5 = fig.add_subplot(515)
res_h = res['Hartmann6 random subspace, D=1000']
for idx, m in enumerate(plot_method_names):
res_name = method_idx_to_res_name[idx]
if res_name not in res_h.objective_at_true_best:
continue # Not run on this problem
Y = np.log(res_h.objective_at_true_best[res_name] - (-3.32237))
f = Y.mean(axis=0)
sem = Y.std(axis=0) / np.sqrt(Y.shape[0])
x = np.arange(1, 201)
color = plot_colors[m]
ax5.plot(x, f, color=color, label=m)
ax5.errorbar(x[9::10], f[9::10], yerr=2 * sem[9::10], color=color, alpha=0.5, ls='')
ax5.set_xlim([0, 201])
ax5.set_ylim([-2.1, 1.7])
ax5.set_xlabel('Function evaluations', fontsize=9)
ax5.set_ylabel('Log regret', fontsize=9)
ax5.grid(alpha=0.2, zorder=-10)
ax5.set_title(r'Hartmann6, $d$=6 random subspace, $D$=1000')
plt.subplots_adjust(right=0.72, bottom=0.06, left=0.08, top=0.97, hspace=0.45)
plt.savefig('pdfs/log_regrets.pdf', pad_inches=0)
if __name__ == '__main__':
make_fig_S7()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
from math import pi
from fig_3 import *
def compute_ll(f, var, test_Y):
return -0.5 * (torch.log(2 * pi * var) + ((test_Y - f) ** 2) / var).sum().item()
def run_simulation():
D = 100
d = 6
# Get projection
torch.manual_seed(10)
B0 = torch.randn(d, D, dtype=torch.double)
B = B0 / torch.sqrt((B0 ** 2).sum(dim=0))
# Get test data
_, _, _, test_X, test_Y, _, _ = gen_train_test_sets(B, ntrain=10, ntest=1000, seed_test=1000)
ns = np.array([40, 50, 75, 100, 125, 150, 175, 200])
nrep = 20
ll_alebo = np.zeros((nrep, len(ns)))
ll_ard = np.zeros((nrep, len(ns)))
for i in range(nrep):
for j, n in enumerate(ns):
# Generate training data
train_X, train_Y, train_Yvar, _, _, mu, sigma = gen_train_test_sets(
B, ntrain=n, ntest=10, seed_train=(i + 1) * len(ns) + j
)
# Predict with each model
f1, var1 = fit_and_predict_alebo(B, train_X, train_Y, train_Yvar, test_X, mu, sigma)
f3, var3 = fit_and_predict_ARDRBF(B, train_X, train_Y, train_Yvar, test_X, mu, sigma)
ll_alebo[i, j] = compute_ll(f1, var1, test_Y)
ll_ard[i, j] = compute_ll(f3, var3, test_Y)
# Save outcome
with open('data/figS3_sim_output.pckl', 'wb') as fout:
pickle.dump((ns, ll_alebo, ll_ard), fout)
def make_fig_S3():
with open('data/figS3_sim_output.pckl', 'rb') as fin:
(ns, ll_alebo, ll_ard) = pickle.load(fin)
ntest = 1000.
nrep = 20
fig = plt.figure(figsize=(3, 2))
ax = fig.add_subplot(111)
ax.errorbar(ns, ll_alebo.mean(axis=0) / ntest, yerr=2 * ll_alebo.std(axis=0)/ ntest / np.sqrt(nrep))
ax.errorbar(ns, ll_ard.mean(axis=0) / ntest, yerr=2 * ll_ard.std(axis=0)/ ntest / np.sqrt(nrep))
ax.grid(alpha=0.2)
ax.set_ylabel('Average test-set\nlog likelihood', fontsize=9)
ax.set_xlabel('Training set size', fontsize=9)
ax.legend(['Mahalanobis, sampled', 'ARD RBF'], fontsize=7, loc='lower right')
plt.subplots_adjust(right=0.995, bottom=0.19, left=0.21, top=0.98, wspace=0.3)
plt.savefig('pdfs/log_likelihood.pdf', pad_inches=0)
if __name__ == '__main__':
run_simulation() # Produces data/figS3_sim_output.pckl
make_fig_S3()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run NASBench benchmarks
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import json
import numpy as np
# This loads the nasbench dataset, which takes ~30s
from nasbench_evaluation import (
get_nasbench_ax_client,
evaluate_parameters,
NASBenchRunner,
)
from ax.modelbridge.registry import Models
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.alebo import ALEBOStrategy
from ax.modelbridge.strategies.rembo import HeSBOStrategy, REMBOStrategy
from ax.storage.json_store.encoder import object_to_json
from ax.core.data import Data
import turbo
import cma
def run_nasbench_benchmarks_ax(rep):
"""
Runs the Ax methods on the nasbench benchmark
(Sobol, ALEBO, HeSBO, REMBO)
"""
gs_list = [
GenerationStrategy(name="Sobol", steps=[GenerationStep(model=Models.SOBOL, num_trials=-1)]),
ALEBOStrategy(D=36, d=12, init_size=10),
HeSBOStrategy(D=36, d=12, init_per_proj=10),
REMBOStrategy(D=36, d=12, init_per_proj=4),
]
for gs in gs_list:
try:
axc = get_nasbench_ax_client(gs)
for i in range(50):
param_dict_i, trial_index = axc.get_next_trial()
raw_data = evaluate_parameters(param_dict_i)
axc.complete_trial(trial_index=trial_index, raw_data=raw_data)
with open(f'results/nasbench_{gs.name}_rep_{rep}.json', 'w') as fout:
json.dump(object_to_json(axc.experiment), fout)
except Exception:
pass
return
def run_nasbench_benchmarks_turbo(rep):
r = NASBenchRunner(max_eval=50)
turbo1 = turbo.Turbo1(
f=r.f,
lb=np.zeros(36),
ub=np.ones(36),
n_init=10,
max_evals=50,
batch_size=1,
)
turbo1.optimize()
with open(f'results/nasbench_turbo_rep_{rep}.json', "w") as fout:
json.dump((r.fs, r.feas), fout)
def run_nasbench_benchmarks_cmaes(rep):
r = NASBenchRunner(max_eval=50)
try:
cma.fmin(
objective_function=r.f,
x0=[0.5] * 36,
sigma0=0.25,
options={
'bounds': [[0.0] * 36, [1.0] * 36],
'maxfevals': 50,
},
)
except ValueError:
pass # CMA-ES doesn't always terminate at exactly maxfevals
with open(f'results/nasbench_cmaes_rep_{rep}.json', "w") as fout:
json.dump((r.fs, r.feas), fout)
if __name__ == '__main__':
for rep in range(100):
run_nasbench_benchmarks_cmaes(rep)
run_nasbench_benchmarks_turbo(rep)
run_nasbench_benchmarks_notbatch(rep)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, List, MutableMapping, Optional, Tuple, Union
from ax.models.torch.botorch_defaults import get_and_fit_model
from ax.modelbridge.strategies.alebo import ALEBOStrategy, get_ALEBOInitializer
import torch
from torch import Tensor
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.search_space import SearchSpace
from ax.modelbridge.factory import DEFAULT_TORCH_DEVICE
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.centered_unit_x import CenteredUnitX
from ax.modelbridge.transforms.standardize_y import StandardizeY
from botorch.models.gpytorch import GPyTorchModel
from ax.models.torch.alebo import ALEBO
class ALEBO_kernel_ablation(ALEBO):
def get_and_fit_model(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
state_dicts: Optional[List[MutableMapping[str, Tensor]]] = None,
) -> GPyTorchModel:
return get_and_fit_model(
Xs=Xs,
Ys=Ys,
Yvars=Yvars,
task_features=[],
fidelity_features=[],
metric_names=[],
state_dict=None,
)
def get_ALEBO_kernel_ablation(
experiment: Experiment,
search_space: SearchSpace,
data: Data,
B: torch.Tensor,
**model_kwargs: Any,
) -> TorchModelBridge:
if search_space is None:
search_space = experiment.search_space
return TorchModelBridge(
experiment=experiment,
search_space=search_space,
data=data,
model=ALEBO_kernel_ablation(B=B, **model_kwargs),
transforms=[CenteredUnitX, StandardizeY],
torch_dtype=B.dtype,
torch_device=B.device,
)
class ALEBOStrategy_kernel_ablation(GenerationStrategy):
def __init__(
self,
D: int,
d: int,
init_size: int,
name: str = "ALEBO",
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
random_kwargs: Optional[Dict[str, Any]] = None,
gp_kwargs: Optional[Dict[str, Any]] = None,
gp_gen_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
self.D = D
self.d = d
self.init_size = init_size
self.dtype = dtype
self.device = device
self.random_kwargs = random_kwargs if random_kwargs is not None else {}
self.gp_kwargs = gp_kwargs if gp_kwargs is not None else {}
self.gp_gen_kwargs = gp_gen_kwargs
B = self.gen_projection(d=d, D=D, device=device, dtype=dtype)
self.gp_kwargs.update({"B": B})
self.random_kwargs.update({"B": B.cpu().numpy()})
steps = [
GenerationStep(
model=get_ALEBOInitializer,
num_arms=init_size,
model_kwargs=self.random_kwargs,
),
GenerationStep(
model=get_ALEBO_kernel_ablation,
num_arms=-1,
model_kwargs=self.gp_kwargs,
model_gen_kwargs=gp_gen_kwargs,
),
]
super().__init__(steps=steps, name=name)
def clone_reset(self) -> "ALEBOStrategy":
"""Copy without state."""
return self.__class__(
D=self.D,
d=self.d,
init_size=self.init_size,
name=self.name,
dtype=self.dtype,
device=self.device,
random_kwargs=self.random_kwargs,
gp_kwargs=self.gp_kwargs,
gp_gen_kwargs=self.gp_gen_kwargs,
)
def gen_projection(
self, d: int, D: int, dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
"""Generate the projection matrix B as a (d x D) tensor
"""
B0 = torch.randn(d, D, dtype=dtype, device=device)
B = B0 / torch.sqrt((B0 ** 2).sum(dim=0))
return B
class ALEBOStrategy_projection_ablation(ALEBOStrategy):
def gen_projection(
self, d: int, D: int, dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
B0 = torch.randn(d, D, dtype=dtype, device=device)
return B0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for CMAES.
Requires installing cma from pip. The experiments here used version 2.7.0.
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import json
from benchmark_problems import (
branin_100,
hartmann6_100,
hartmann6_1000,
hartmann6_random_subspace_1000,
)
from ax.benchmark.benchmark import benchmark_minimize_callable
from ax.storage.json_store.encoder import object_to_json
import cma # cma==2.7.0
def run_hartmann6_benchmarks(D, rep, random_subspace=False):
if D == 100:
problem = hartmann6_100
elif D == 1000 and not random_subspace:
problem = hartmann6_1000
elif D == 1000 and random_subspace:
problem = hartmann6_random_subspace_1000
experiment, f = benchmark_minimize_callable(
problem=problem,
num_trials=200,
method_name='cmaes',
replication_index=rep,
)
try:
cma.fmin(
objective_function=f,
x0=[0.5] * D,
sigma0=0.25,
options={'bounds': [[0] * D, [1] * D], 'maxfevals': 200},
)
except ValueError:
pass # CMA-ES doesn't always terminate at exactly maxfevals
rs_str = 'random_subspace_' if random_subspace else ''
with open(f'results/hartmann6_{rs_str}{D}_cmaes_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
def run_branin_benchmarks(rep):
experiment, f = benchmark_minimize_callable(
problem=branin_100,
num_trials=50,
method_name='cmaes',
replication_index=rep,
)
try:
cma.fmin(
objective_function=f,
x0=[2.5] * 50 + [7.5] * 50,
sigma0=3.75,
options={
'bounds': [[-5] * 50 + [0] * 50, [10] * 50 + [15] * 50],
'maxfevals': 50,
},
)
except ValueError:
pass # CMA-ES doesn't always terminate at exactly maxfevals
with open(f'results/branin_100_cmaes_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
if __name__ == '__main__':
# Run all of the CMAES experiments.
# These can be distributed.
for i in range(50):
# Hartmann6, D=100: Each rep takes ~5 s
run_hartmann6_benchmarks(D=100, rep=i)
# Hartmann6, D=1000: Each rep takes ~10 s
run_hartmann6_benchmarks(D=1000, rep=i)
# Branin, D=100: Each rep takes ~1 s
run_branin_benchmarks(rep=i)
# Hartmann6 random subspace, D=1000
run_hartmann6_benchmarks(D=1000, rep=i, random_subspace=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for SMAC.
Requires installing smac from pip. The experiments here used version 2.7.0.
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import json
import numpy as np
from benchmark_problems import (
branin_100,
hartmann6_100,
hartmann6_1000,
hartmann6_random_subspace_1000,
)
from ax.benchmark.benchmark import benchmark_minimize_callable
from ax.storage.json_store.encoder import object_to_json
from smac.facade.smac_hpo_facade import SMAC4HPO
from smac.scenario.scenario import Scenario
from smac.configspace import ConfigurationSpace
from smac.runhistory.runhistory import RunKey
from smac.initial_design.random_configuration_design import RandomConfigurations
from smac.tae.execute_func import ExecuteTAFuncArray
from ConfigSpace.hyperparameters import UniformFloatHyperparameter
def fmin_smac_nopynisher(func, x0, bounds, maxfun, rng):
"""
Minimize a function using SMAC, but without pynisher, which doesn't work
well with benchmark_minimize_callable.
This function is based on SMAC's fmin_smac.
"""
cs = ConfigurationSpace()
tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
for idx, (lower_bound, upper_bound) in enumerate(bounds):
parameter = UniformFloatHyperparameter(
name=tmplt.format(idx + 1),
lower=lower_bound,
upper=upper_bound,
default_value=x0[idx],
)
cs.add_hyperparameter(parameter)
scenario_dict = {
"run_obj": "quality",
"cs": cs,
"deterministic": "true",
"initial_incumbent": "DEFAULT",
"runcount_limit": maxfun,
}
scenario = Scenario(scenario_dict)
def call_ta(config):
x = np.array([val for _, val in sorted(config.get_dictionary().items())],
dtype=np.float)
return func(x)
smac = SMAC4HPO(
scenario=scenario,
tae_runner=ExecuteTAFuncArray,
tae_runner_kwargs={'ta': call_ta, 'use_pynisher': False},
rng=rng,
initial_design=RandomConfigurations,
)
smac.optimize()
return
def run_hartmann6_benchmarks(D, rep, random_subspace=False):
if D == 100:
problem = hartmann6_100
elif D == 1000 and not random_subspace:
problem = hartmann6_1000
elif D == 1000 and random_subspace:
problem = hartmann6_random_subspace_1000
experiment, f = benchmark_minimize_callable(
problem=problem,
num_trials=200,
method_name='smac',
replication_index=rep,
)
fmin_smac_nopynisher(
func=f,
x0=[0.5] * D,
bounds=[[0, 1]] * D,
maxfun=200,
rng=rep + 1,
)
rs_str = 'random_subspace_' if random_subspace else ''
with open(f'results/hartmann6_{rs_str}{D}_smac_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
def run_branin_benchmarks(rep):
experiment, f = benchmark_minimize_callable(
problem=branin_100,
num_trials=50,
method_name='smac',
replication_index=rep,
)
fmin_smac_nopynisher(
func=f,
x0=[2.5] * 50 + [7.5] * 50,
bounds=[[-5, 10]] * 50 + [[0, 15]] * 50,
maxfun=50,
rng=rep + 1,
)
with open(f'results/branin_100_smac_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
if __name__ == '__main__':
# Run all of the SMAC experiments.
# These can be distributed.
for i in range(50):
# Hartmann6, D=100: Each rep takes ~1.5 hours
run_hartmann6_benchmarks(D=100, rep=i)
# Branin, D=100: Each rep takes ~20 mins
run_branin_benchmarks(rep=i)
# Hartmann6, D=1000: Each rep takes ~36 hours
for i in range(10):
run_hartmann6_benchmarks(D=1000, rep=i)
# Hartmann6 random subspace, D=1000
run_hartmann6_benchmarks(D=1000, rep=i, random_subspace=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for TuRBO.
Requires installing turbo from https://github.com/uber-research/TuRBO.
The experiments here used version 0.0.1 (commit 8461f9c).
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import json
import numpy as np
from benchmark_problems import (
branin_100,
hartmann6_100,
hartmann6_1000,
hartmann6_random_subspace_1000,
)
from ax.benchmark.benchmark import benchmark_minimize_callable
from ax.storage.json_store.encoder import object_to_json
import turbo
def run_hartmann6_benchmarks(D, rep, random_subspace=False):
if D == 100:
problem = hartmann6_100
elif D == 1000 and not random_subspace:
problem = hartmann6_1000
elif D == 1000 and random_subspace:
problem = hartmann6_random_subspace_1000
experiment, f = benchmark_minimize_callable(
problem=problem,
num_trials=200,
method_name='turbo',
replication_index=rep,
)
turbo1 = turbo.Turbo1(
f=f,
lb=np.zeros(D),
ub=np.ones(D),
n_init=10,
max_evals=200,
)
turbo1.optimize()
rs_str = 'random_subspace_' if random_subspace else ''
with open(f'results/hartmann6_{rs_str}{D}_turbo_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
def run_branin_benchmarks(rep):
experiment, f = benchmark_minimize_callable(
problem=branin_100,
num_trials=50,
method_name='turbo',
replication_index=rep,
)
turbo1 = turbo.Turbo1(
f=f,
lb=np.hstack((-5 * np.ones(50), np.zeros(50))),
ub=np.hstack((10 * np.ones(50), 15 * np.ones(50))),
n_init=10,
max_evals=50,
)
turbo1.optimize()
with open(f'results/branin_100_turbo_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
if __name__ == '__main__':
# Run all of the TuRBO experiments.
# These can be distributed.
for i in range(50):
# Hartmann6, D=100: Each rep takes ~15 mins
run_hartmann6_benchmarks(D=100, rep=i)
# Hartmann6, D=1000: Each rep takes ~30 mins
run_hartmann6_benchmarks(D=1000, rep=i)
# Branin, D=100: Each rep takes ~5 mins
run_branin_benchmarks(rep=i)
# Hartmann6 random subspace, D=1000
run_hartmann6_benchmarks(D=1000, rep=i, random_subspace=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Compile all of the benchmark results from the different methods (potentially
run in a distributed fashion) into a single BenchmarkResult object.
All of the benchmark runs should be completed before running this.
"""
import gc
import json
import numpy as np
from benchmark_problems import (
branin_100,
branin_by_D,
gramacy_100,
hartmann6_100,
hartmann6_1000,
hartmann6_random_subspace_1000,
)
from ax.benchmark.benchmark_result import aggregate_problem_results
from ax.storage.json_store.encoder import object_to_json
from ax.storage.json_store.decoder import object_from_json
def merge_benchmark_results(res1, res2):
"""
Merges two benchmark results dictionaries in-place (res2 into res1)
"""
for problem_name in res2:
for method_name in res2[problem_name]:
for exp in res2[problem_name][method_name]:
res1 = add_exp(
res=res1,
exp=exp,
problem_name=problem_name,
method_name=method_name,
)
return res1
def add_exp(res, exp, problem_name, method_name):
"""
Add benchmark experiment exp to results dict res, under the specified
problem_name and method_name.
"""
if problem_name not in res:
res[problem_name] = {}
if method_name not in res[problem_name]:
res[problem_name][method_name] = []
res[problem_name][method_name].append(exp)
return res
def compile_hartmann6(D, random_subspace=False):
if D == 100:
problem = hartmann6_100
other_methods = ['addgpucb', 'cmaes', 'ebo', 'smac', 'turbo']
rls = ['rrembos_standard_kPsi', 'rrembos_reverse_kPsi', 'coordinatelinebo', 'descentlinebo', 'randomlinebo']
rs_str = ''
elif D == 1000 and not random_subspace:
problem = hartmann6_1000
other_methods = ['cmaes', 'smac', 'turbo']
rls = ['rrembos_standard_kPsi']
rs_str = ''
elif D == 1000 and random_subspace:
problem = hartmann6_random_subspace_1000
other_methods = ['cmaes', 'smac', 'turbo']
rls = []
rs_str = 'random_subspace_'
all_results = {}
for rep in range(50):
with open(f'results/hartmann6_{rs_str}{D}_alebo_rembo_hesbo_sobol_rep_{rep}.json', 'r') as fin:
res_i = object_from_json(json.load(fin))
all_results = merge_benchmark_results(all_results, res_i)
for method_name in other_methods:
if D==1000 and method_name == 'smac' and rep > 9:
# SMAC D=1000 only run for 10 reps
continue
with open(f'results/hartmann6_{rs_str}{D}_{method_name}_rep_{rep}.json', 'r') as fin:
exp_i = object_from_json(json.load(fin))
all_results = add_exp(res=all_results, exp=exp_i, problem_name=problem.name, method_name=method_name)
res = aggregate_problem_results(runs=all_results[problem.name], problem=problem)
# Add in RRembo and LineBOresults
for method in rls:
with open(f'results/hartmann6_{D}_{method}.json', 'r') as fin:
A = json.load(fin)
res.objective_at_true_best[method] = np.minimum.accumulate(np.array(A), axis=1)
# Save
with open(f'results/hartmann6_{rs_str}{D}_aggregated_results.json', "w") as fout:
json.dump(object_to_json({problem.name: res}), fout)
def compile_branin_gramacy_100():
all_results = {}
for rep in range(50):
with open(f'results/branin_gramacy_100_alebo_rembo_hesbo_sobol_rep_{rep}.json', 'r') as fin:
res_i = object_from_json(json.load(fin))
all_results = merge_benchmark_results(all_results, res_i)
for method_name in ['addgpucb', 'cmaes', 'ebo', 'smac', 'turbo']:
with open(f'results/branin_100_{method_name}_rep_{rep}.json', 'r') as fin:
exp_i = object_from_json(json.load(fin))
all_results = add_exp(
res=all_results,
exp=exp_i,
problem_name='Branin, D=100',
method_name=method_name,
)
res = {
p.name: aggregate_problem_results(runs=all_results[p.name], problem=p)
for p in [branin_100, gramacy_100]
}
# Add in RRembo results
for proj in ['standard', 'reverse']:
method = f'rrembos_{proj}_kPsi'
with open(f'results/branin_100_{method}.json', 'r') as fin:
A = json.load(fin)
res['Branin, D=100'].objective_at_true_best[method] = np.minimum.accumulate(np.array(A), axis=1)
# Save
with open(f'results/branin_gramacy_100_aggregated_results.json', "w") as fout:
json.dump(object_to_json(res), fout)
def compile_sensitivity_benchmarks():
all_results = {}
for rep in range(50):
## Sensitivity to D
with open(f'results/sensitivity_D_rep_{rep}.json', 'r') as fin:
results_dict = json.load(fin)
for D, obj in results_dict.items():
res_i = object_from_json(obj)
all_results = merge_benchmark_results(all_results, res_i)
## Sensitivity to d_e
with open(f'results/sensitivity_d_e_rep_{rep}.json', 'r') as fin:
res_i = object_from_json(json.load(fin))
all_results = merge_benchmark_results(all_results, res_i)
all_problems = (
[hartmann6_100, hartmann6_1000, branin_100, gramacy_100]
+ list(branin_by_D.values())
)
problems = [branin_100] + list(branin_by_D.values())
res = {
p.name+'_sensitivity': aggregate_problem_results(runs=all_results[p.name], problem=p)
for p in problems
}
# Save
with open(f'results/sensitivity_aggregated_results.json', "w") as fout:
json.dump(object_to_json(res), fout)
def compile_ablation_benchmarks():
all_results = {}
for rep in range(100):
with open(f'results/ablation_rep_{rep}.json', 'r') as fin:
res_i = object_from_json(json.load(fin))
all_results = merge_benchmark_results(all_results, res_i)
problems = [branin_100]
res = {
p.name+'_ablation': aggregate_problem_results(runs=all_results[p.name], problem=p)
for p in problems
}
# Save
with open(f'results/ablation_aggregated_results.json', "w") as fout:
json.dump(object_to_json(res), fout)
def compile_nasbench():
all_res = {}
# TuRBO and CMAES
for method in ['turbo', 'cmaes']:
all_res[method] = []
for rep in range(100):
with open(f'results/nasbench_{method}_rep_{rep}.json', 'r') as fin:
fs, feas = json.load(fin)
# Set infeasible points to nan
fs = np.array(fs)
fs[~np.array(feas)] = np.nan
all_res[method].append(fs)
# Ax methods
for method in ['Sobol', 'ALEBO', 'HeSBO', 'REMBO']:
all_res[method] = []
for rep in range(100):
with open(f'results/nasbench_{method}_rep_{rep}.json', 'r') as fin:
exp = object_from_json(json.load(fin))
# Pull out results and set infeasible points to nan
df = exp.fetch_data().df.sort_values(by='arm_name')
df_obj = df[df['metric_name'] == 'final_test_accuracy'].copy().reset_index(drop=True)
df_con = df[df['metric_name'] == 'final_training_time'].copy().reset_index(drop=True)
infeas = df_con['mean'] > 1800
df_obj.loc[infeas, 'mean'] = np.nan
all_res[method].append(df_obj['mean'].values)
for method, arr in all_res.items():
all_res[method] = np.fmax.accumulate(np.vstack(all_res[method]), axis=1)
with open(f'results/nasbench_aggregated_results.json', "w") as fout:
json.dump(object_to_json(all_res), fout)
if __name__ == '__main__':
compile_nasbench()
gc.collect()
compile_hartmann6(D=100)
gc.collect()
compile_hartmann6(D=1000)
gc.collect()
compile_branin_gramacy_100()
gc.collect()
compile_sensitivity_benchmarks()
gc.collect()
compile_hartmann6(D=1000, random_subspace=True)
gc.collect()
compile_ablation_benchmarks()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
import json
import numpy as np
from scipy.stats import special_ortho_group
from typing import List, Optional
from ax.benchmark.benchmark_problem import BenchmarkProblem
from ax.core.objective import Objective
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint
from ax.core.parameter import ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.metrics.branin import BraninMetric
from ax.metrics.hartmann6 import Hartmann6Metric
from ax.metrics.noisy_function import NoisyFunctionMetric
from ax.storage.metric_registry import register_metric
from ax.utils.measurement.synthetic_functions import hartmann6
### Hartmann6 problem, D=100 and D=1000
# Relevant parameters were chosen randomly using
# x = np.arange(100)
# np.random.seed(10)
# np.random.shuffle(x)
# print(x[:6]) # [19 14 43 37 66 3]
hartmann6_100 = BenchmarkProblem(
name="Hartmann6, D=100",
optimal_value=-3.32237,
optimization_config=OptimizationConfig(
objective=Objective(
metric=Hartmann6Metric(
name="objective",
param_names=["x19", "x14", "x43", "x37", "x66", "x3"],
noise_sd=0.0,
),
minimize=True,
)
),
search_space=SearchSpace(
parameters=[
RangeParameter(
name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
)
for i in range(100)
]
),
)
hartmann6_1000 = BenchmarkProblem(
name="Hartmann6, D=1000",
optimal_value=-3.32237,
optimization_config=OptimizationConfig(
objective=Objective(
metric=Hartmann6Metric(
name="objective",
param_names=["x190", "x140", "x430", "x370", "x660", "x30"],
noise_sd=0.0,
),
minimize=True,
)
),
search_space=SearchSpace(
parameters=[
RangeParameter(
name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
)
for i in range(1000)
]
),
)
### Branin problem, D=100 and sensitivity analysis
# Original x1 and x2 have different bounds, so we create blocks of 50 for each
# with each of the bounds and set the relevant parameters in those blocks.
branin_100 = BenchmarkProblem(
name="Branin, D=100",
optimal_value=0.397887,
optimization_config=OptimizationConfig(
objective=Objective(
metric=BraninMetric(
name="objective", param_names=["x19", "x64"], noise_sd=0.0
),
minimize=True,
)
),
search_space=SearchSpace(
parameters=[ # pyre-ignore
RangeParameter(
name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=-5.0, upper=10.0
)
for i in range(50)
]
+ [
RangeParameter(
name=f"x{i + 50}",
parameter_type=ParameterType.FLOAT,
lower=0.0,
upper=15.0,
)
for i in range(50)
]
),
)
# Additional dimensionalities for the sensitivity analysis to D.
# Random embedding methods are invariant to the ordering of relevant/irrelevant
# parameters, and also to the bounds on the irrelevant parameters. So since
# these problems are being used only for ALEBO, we can simplify their
# definition and take x0 and x1 as the relevant.
base_branin_parameters = [
RangeParameter(
name="x0", parameter_type=ParameterType.FLOAT, lower=-5.0, upper=10.0
),
RangeParameter(
name="x1", parameter_type=ParameterType.FLOAT, lower=0.0, upper=15.0
),
]
branin_by_D = {
D: BenchmarkProblem(
name="Branin, D=" + str(D),
optimal_value=0.397887,
optimization_config=OptimizationConfig(
objective=Objective(
metric=BraninMetric(
name="objective", param_names=["x0", "x1"], noise_sd=0.0
),
minimize=True,
)
),
search_space=SearchSpace(
parameters=base_branin_parameters # pyre-ignore
+ [
RangeParameter(
name=f"x{i}",
parameter_type=ParameterType.FLOAT,
lower=0.0,
upper=1.0,
)
for i in range(2, D)
]
),
)
for D in [50, 200, 500, 1000]
}
### Gramacy problem, D=100
class GramacyObjective(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return x.sum()
class GramacyConstraint1(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return 1.5 - x[0] - 2 * x[1] - 0.5 * np.sin(2 * np.pi * (x[0] ** 2 - 2 * x[1]))
class GramacyConstraint2(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return x[0] ** 2 + x[1] ** 2 - 1.5
# Register these metrics so they can be serialized to json
register_metric(metric_cls=GramacyObjective, val=101)
register_metric(metric_cls=GramacyConstraint1, val=102)
register_metric(metric_cls=GramacyConstraint2, val=103)
gramacy_100 = BenchmarkProblem(
name="Gramacy, D=100",
optimal_value=0.5998,
optimization_config=OptimizationConfig(
objective=Objective(
metric=GramacyObjective(
name="objective", param_names=["x19", "x64"], noise_sd=0.0
),
minimize=True,
),
outcome_constraints=[
OutcomeConstraint(
metric=GramacyConstraint1(
name="constraint1", param_names=["x19", "x64"], noise_sd=0.0
),
op=ComparisonOp.LEQ,
bound=0.0,
relative=False,
),
OutcomeConstraint(
metric=GramacyConstraint2(
name="constraint2", param_names=["x19", "x64"], noise_sd=0.0
),
op=ComparisonOp.LEQ,
bound=0.0,
relative=False,
),
],
),
search_space=SearchSpace(
parameters=[
RangeParameter(
name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
)
for i in range(100)
]
),
)
### Hartmann6 D=1000 with random subspace
class Hartmann6RandomSubspace1000Metric(NoisyFunctionMetric):
def __init__(
self,
name: str,
param_names: List[str],
noise_sd: float = 0.0,
lower_is_better: Optional[bool] = None,
) -> None:
super().__init__(
name=name,
param_names=param_names,
noise_sd=noise_sd,
lower_is_better=lower_is_better,
)
# Set the random basis
try:
with open('data/random_subspace_1000x6.json', 'r') as fin:
self.random_basis = np.array(json.load(fin))
except IOError:
np.random.seed(1000)
self.random_basis = special_ortho_group.rvs(1000)[:6, :]
with open('data/random_subspace_1000x6.json', 'w') as fout:
json.dump(self.random_basis.tolist(), fout)
def f(self, x: np.ndarray) -> float:
# Project down to the true subspace
z = self.random_basis @ x
# x \in [-1, 1], so adjust z to be closer to [0, 1], and evaluate
return hartmann6((z + 1) / 2.)
register_metric(metric_cls=Hartmann6RandomSubspace1000Metric, val=104)
hartmann6_random_subspace_1000 = BenchmarkProblem(
name="Hartmann6 random subspace, D=1000",
optimal_value=-3.32237,
optimization_config=OptimizationConfig(
objective=Objective(
metric=Hartmann6RandomSubspace1000Metric(
name="objective",
param_names=[f"x{i}" for i in range(1000)],
noise_sd=0.0,
),
minimize=True,
)
),
search_space=SearchSpace(
parameters=[
RangeParameter(
name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=-1.0, upper=1.0
)
for i in range(1000)
]
),
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Add-GP-UCB.
Requires installing dragonfly-opt from pip. The experiments here used version
0.1.4.
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
from argparse import Namespace
import json
from benchmark_problems import (
branin_100,
hartmann6_100,
hartmann6_1000,
)
from ax.benchmark.benchmark import benchmark_minimize_callable
from ax.storage.json_store.encoder import object_to_json
from dragonfly import minimise_function # dragonfly-opt==0.1.4
def run_hartmann6_benchmarks(D, rep):
if D == 100:
problem = hartmann6_100
elif D == 1000:
problem = hartmann6_1000
experiment, f = benchmark_minimize_callable(
problem=problem,
num_trials=200,
method_name='add_gp_ucb',
replication_index=rep,
)
options = Namespace(acq="add_ucb")
res = minimise_function(f, domain=[[0, 1]] * D, max_capital=199, options=options)
with open(f'results/hartmann6_{D}_addgpucb_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
def run_branin_benchmarks(rep):
experiment, f = benchmark_minimize_callable(
problem=branin_100,
num_trials=50,
method_name='add_gp_ucb',
replication_index=rep,
)
options = Namespace(acq="add_ucb")
res = minimise_function(
f,
domain=[[-5, 10]] * 50 + [[0, 15]] * 50,
max_capital=49,
options=options,
)
with open(f'results/branin_100_addgpucb_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
if __name__ == '__main__':
# Run all of the Add-GP-UCB benchmarks using Dragonfly.
# These can be distributed.
for i in range(50):
run_hartmann6_benchmarks(D=100, rep=i)
## Hartmann6, D=1000: Too slow, not run
#run_hartmann6_benchmarks(D=1000, rep=i)
# Branin, D=100: Each rep takes ~3 hours
run_branin_benchmarks(rep=i)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for LineBO on the Hartmann6 100 problem.
These benchmarks are run using the benchmarking suite contained in the LineBO
codebase. The code here loads in those results and extracts just the function
evaluations, which are then stored in json. There are several steps that must
be done before this script can be run.
1) Clone and install the LineBO software from https://github.com/jkirschner42/LineBO
2) copy data/hartmann6_100.yaml from here into the LineBO/config/ directory.
This problem configuration is based on LineBO/config/hartmann6_sub14.yaml.
3) Run the experiments by executing:
febo create hartmann6_100 --config config/hartmann6_100.yaml
febo run hartmann6_100
4) Copy LineBO/runs/hartmann6_100/data/evaluations.hdf5 into results/
"""
import numpy as np
import h5py
import json
f = h5py.File('results/evaluations.hdf5', 'r')
methods = ['RandomLineBO', 'CoordinateLineBO', 'DescentLineBO', 'Random', 'CMA-ES']
ys = {}
for i, m in enumerate(methods):
ys[m] = np.zeros((50, 200))
for rep in range(50):
ys[m][rep, :] = f[str(i)][str(rep)]['y_exact'] * -3.32237
with open('results/hartmann6_100_randomlinebo.json', 'w') as fout:
json.dump(ys['RandomLineBO'].tolist(), fout)
with open('results/hartmann6_100_coordinatelinebo.json', 'w') as fout:
json.dump(ys['CoordinateLineBO'].tolist(), fout)
with open('results/hartmann6_100_descentlinebo.json', 'w') as fout:
json.dump(ys['DescentLineBO'].tolist(), fout)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for: ALEBO, REMBO, HeSBO, and Sobol.
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import json
from benchmark_problems import (
branin_100,
branin_by_D,
gramacy_100,
hartmann6_100,
hartmann6_1000,
hartmann6_random_subspace_1000,
)
from ablation_models import (
ALEBOStrategy_projection_ablation,
ALEBOStrategy_kernel_ablation,
)
from ax.benchmark.benchmark import full_benchmark_run
from ax.benchmark.benchmark_result import aggregate_problem_results
from ax.modelbridge.registry import Models
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.strategies.alebo import ALEBOStrategy
from ax.modelbridge.strategies.rembo import HeSBOStrategy, REMBOStrategy
from ax.storage.json_store.encoder import object_to_json
def run_hartmann6_benchmarks(D, rep, random_subspace=False):
if D == 100:
problem = hartmann6_100
elif D == 1000 and not random_subspace:
problem = hartmann6_1000
elif D == 1000 and random_subspace:
problem = hartmann6_random_subspace_1000
strategy0 = GenerationStrategy(
name="Sobol",
steps=[
GenerationStep(
model=Models.SOBOL, num_arms=-1, model_kwargs={'seed': rep + 1}
)
],
)
strategy1 = ALEBOStrategy(D=D, d=12, init_size=10)
strategy2 = REMBOStrategy(D=D, d=6, init_per_proj=2)
strategy3 = HeSBOStrategy(D=D, d=6, init_per_proj=10, name=f"HeSBO, d=d")
strategy4 = HeSBOStrategy(D=D, d=12, init_per_proj=10, name=f"HeSBO, d=2d")
all_benchmarks = full_benchmark_run(
num_replications=1, # Running them 1 at a time for distributed
num_trials=200,
batch_size=1,
methods=[strategy0, strategy1, strategy2, strategy3, strategy4],
problems=[problem],
)
rs_str = 'random_subspace_' if random_subspace else ''
with open(
f'results/hartmann6_{rs_str}{D}_alebo_rembo_hesbo_sobol_rep_{rep}.json', "w"
) as fout:
json.dump(object_to_json(all_benchmarks), fout)
def run_branin_and_gramacy_100_benchmarks(rep):
strategy0 = GenerationStrategy(
name="Sobol",
steps=[
GenerationStep(
model=Models.SOBOL, num_arms=-1, model_kwargs={'seed': rep + 1}
)
],
)
strategy1 = ALEBOStrategy(D=100, d=4, init_size=10)
strategy2 = REMBOStrategy(D=100, d=2, init_per_proj=2)
strategy3 = HeSBOStrategy(D=100, d=4, init_per_proj=10, name=f"HeSBO, d=2d")
all_benchmarks = full_benchmark_run(
num_replications=1,
num_trials=50,
batch_size=1,
methods=[strategy0, strategy1, strategy2, strategy3],
problems=[branin_100, gramacy_100],
)
with open(
f'results/branin_gramacy_100_alebo_rembo_hesbo_sobol_rep_{rep}.json', "w"
) as fout:
json.dump(object_to_json(all_benchmarks), fout)
def run_sensitivity_D_benchmarks(rep):
results_dict = {}
for D, problem in branin_by_D.items():
strategy1 = ALEBOStrategy(D=D, d=4, init_size=10)
all_benchmarks = full_benchmark_run(
num_replications=1,
num_trials=50,
batch_size=1,
methods=[strategy1],
problems=[problem],
)
results_dict[D] = object_to_json(all_benchmarks)
with open(f'results/sensitivity_D_rep_{rep}.json', "w") as fout:
json.dump(results_dict, fout)
def run_sensitivity_d_e_benchmarks(rep):
strategies = [
ALEBOStrategy(D=100, d=d_e, init_size=10, name=f'ALEBO, d={d_e}')
for d_e in [2, 3, 5, 6, 7, 8]
]
all_benchmarks = full_benchmark_run(
num_replications=1,
num_trials=50,
batch_size=1,
methods=strategies,
problems=[branin_100],
)
with open(f'results/sensitivity_d_e_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(all_benchmarks), fout)
def run_ablation_benchmarks(rep):
strategies = [
ALEBOStrategy_projection_ablation(D=100, d=4, init_size=10, name='ALEBO, projection ablation'),
ALEBOStrategy_kernel_ablation(D=100, d=4, init_size=10, name='ALEBO, kernel ablation'),
ALEBOStrategy(D=100, d=4, init_size=10, name='ALEBO, base'),
]
all_benchmarks = full_benchmark_run(
num_replications=1,
num_trials=50,
batch_size=1,
methods=strategies,
problems=[branin_100],
)
with open(f'results/ablation_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(all_benchmarks), fout)
if __name__ == '__main__':
# Run all of the benchmark replicates.
# They are set up here to run as individual replicates becaus they can be
# distributed.
for i in range(50):
# Hartmann6, D=100: Each rep takes ~2 hrs
run_hartmann6_benchmarks(D=100, rep=i)
# Hartmann6, D=1000: Each rep takes ~2.5 hrs
run_hartmann6_benchmarks(D=1000, rep=i)
# Hartmann6, D=1000: Each rep takes ~2.5 hrs
run_hartmann6_benchmarks(D=1000, rep=i, random_subspace=True)
# Branin and Gramacy, D=100: Each rep takes ~20 mins
run_branin_and_gramacy_100_benchmarks(rep=i)
# Sensitivity benchmarks: Each rep takes ~2 hrs
run_sensitivity_D_benchmarks(rep=i)
run_sensitivity_d_e_benchmarks(rep=i)
for i in range(100):
# Ablation benchmarks
run_ablation_benchmarks(rep=i)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Run benchmarks for Ensemble BO.
A few steps are required to use EBO:
(1) git clone https://github.com/zi-w/Ensemble-Bayesian-Optimization
in this directory. These experiments used commit
4e6f9ed04833cc2e21b5906b1181bc067298f914.
(2) fix a python3 issue by editing ebo_core/helper.py to insert
shape = int(shape)
in line 7.
"""
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import sys
sys.path.insert(1, os.path.join(os.getcwd(), 'Ensemble-Bayesian-Optimization'))
from ebo_core.ebo import ebo
import json
import numpy as np
from benchmark_problems import (
branin_100,
hartmann6_100,
hartmann6_1000,
)
from ax.benchmark.benchmark import benchmark_minimize_callable
from ax.storage.json_store.encoder import object_to_json
# These options are taken as package defaults from test_ebo.py
core_options = {
#'x_range':x_range, # input domain
#'dx':x_range.shape[1], # input dimension
#'max_value':f.f_max + sigma*5, # target value
#'T':10, # number of iterations
'B':10, # number of candidates to be evaluated
'dim_limit':3, # max dimension of the input for each additive function component
'isplot':0, # 1 if plotting the result; otherwise 0.
'z':None, 'k':None, # group assignment and number of cuts in the Gibbs sampling subroutine
'alpha':1., # hyperparameter of the Gibbs sampling subroutine
'beta':np.array([5.,2.]),
'opt_n':1000, # points randomly sampled to start continuous optimization of acfun
'pid':'test3', # process ID for Azure
'datadir':'tmp_data/', # temporary data directory for Azure
'gibbs_iter':10, # number of iterations for the Gibbs sampling subroutine
'useAzure':False, # set to True if use Azure for batch evaluation
'func_cheap':True, # if func cheap, we do not use Azure to test functions
'n_add':None, # this should always be None. it makes dim_limit complicated if not None.
'nlayers': 100, # number of the layers of tiles
'gp_type':'l1', # other choices are l1, sk, sf, dk, df
#'gp_sigma':0.1, # noise standard deviation
'n_bo':10, # min number of points selected for each partition
'n_bo_top_percent': 0.5, # percentage of top in bo selections
'n_top':10, # how many points to look ahead when doing choose Xnew
'min_leaf_size':10, # min number of samples in each leaf
'max_n_leaves':10, # max number of leaves
'thresAzure':1, # if batch size > thresAzure, we use Azure
'save_file_name': 'tmp/tmp.pk',
}
def run_hartmann6_benchmarks(D, rep):
if D == 100:
problem = hartmann6_100
elif D == 1000:
problem = hartmann6_1000
experiment, f = benchmark_minimize_callable(
problem=problem,
num_trials=200,
method_name='ebo',
replication_index=rep,
)
options = {
'x_range': np.vstack((np.zeros(D), np.ones(D))),
'dx': D,
'max_value': 3.32237, # Let it cheat and know the true max value
'T': 200,
'gp_sigma': 1e-7,
}
options.update(core_options)
f_max = lambda x: -f(x) # since EBO maximizes
e = ebo(f_max, options)
try:
e.run()
except ValueError:
pass # EBO can ask for more than T function evaluations
with open(f'results/hartmann6_{D}_ebo_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
def run_branin_benchmarks(rep):
experiment, f = benchmark_minimize_callable(
problem=branin_100,
num_trials=50,
method_name='ebo',
replication_index=rep,
)
options = {
'x_range': np.vstack((
np.hstack((-5 * np.ones(50), np.zeros(50))),
np.hstack((10 * np.ones(50), 15 * np.ones(50))),
)),
'dx': 100,
'max_value': -0.397887, # Let it cheat and know the true max value
'T': 50,
'gp_sigma': 1e-7,
}
options.update(core_options)
f_max = lambda x: -f(x) # since EBO maximizes
e = ebo(f_max, options)
try:
e.run()
except ValueError:
pass # EBO can ask for more than T function evaluations
with open(f'results/branin_100_ebo_rep_{rep}.json', "w") as fout:
json.dump(object_to_json(experiment), fout)
if __name__ == '__main__':
# Run all of the EBO benchmarks.
# These can be distributed.
for i in range(50):
# Hartmann6, D=100: Each rep takes ~2 hours
run_hartmann6_benchmarks(D=100, rep=i)
## Hartmann6, D=1000: Too slow, not run
#run_hartmann6_benchmarks(D=1000, rep=i)
# Branin, D=100: Each rep takes ~20 mins
run_branin_benchmarks(rep=i)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.
"""
Requires nasbench==1.0 from https://github.com/google-research/nasbench
Also requires dataset nasbench_only108.tfrecord to be downloaded here.
Creates an evaluation functionn for neural architecture search
"""
import numpy as np
from ax.service.ax_client import AxClient
from nasbench.lib.model_spec import ModelSpec
from nasbench import api
nasbench = api.NASBench('nasbench_only108.tfrecord')
def get_spec(adj_indxs, op_indxs):
"""
Construct a NASBench spec from adjacency matrix and op indicators
"""
op_names = ['conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3']
ops = ['input']
ops.extend([op_names[i] for i in op_indxs])
ops.append('output')
iu = np.triu_indices(7, k=1)
adj_matrix = np.zeros((7, 7), dtype=np.int32)
adj_matrix[(iu[0][adj_indxs], iu[1][adj_indxs])] = 1
spec = ModelSpec(adj_matrix, ops)
return spec
def evaluate_x(x):
"""
Evaluate NASBench on the model defined by x.
x is a 36-d array.
The first 21 are for the adjacency matrix. Largest entries will have the
corresponding element in the adjacency matrix set to 1, with as many 1s as
possible within the NASBench model space.
The last 15 are for the ops in each of the five NASBench model components.
One-hot encoded for each of the 5 components, 3 options.
"""
assert len(x) == 36
x_adj = x[:21]
x_op = x[-15:]
x_ord = x_adj.argsort()[::-1]
op_indxs = x_op.reshape(3, 5).argmax(axis=0).tolist()
last_good = None
for i in range(1, 22):
model_spec = get_spec(x_ord[:i], op_indxs)
if model_spec.matrix is not None:
# We have a connected graph
# See if it has too many edges
if model_spec.matrix.sum() > 9:
break
last_good = model_spec
if last_good is None:
# Could not get a valid spec from this x. Return bad metric values.
return [0.80], [50 * 60]
fixed_metrics, computed_metrics = nasbench.get_metrics_from_spec(last_good)
test_acc = [r['final_test_accuracy'] for r in computed_metrics[108]]
train_time = [r['final_training_time'] for r in computed_metrics[108]]
return np.mean(test_acc), np.mean(train_time)
def evaluate_parameters(parameters):
x = np.array([parameters[f'x{i}'] for i in range(36)])
test_acc, train_time = evaluate_x(x)
return {
'final_test_accuracy': (test_acc, 0.0),
'final_training_time': (train_time, 0.0),
}
def get_nasbench_ax_client(generation_strategy):
# Get parameters
parameters = [
{
"name": f"x{i}",
"type": "range",
"bounds": [0, 1],
"value_type": "float",
"log_scale": False,
} for i in range(36)
]
axc = AxClient(generation_strategy=generation_strategy, verbose_logging=False)
axc.create_experiment(
name="nasbench",
parameters=parameters,
objective_name="final_test_accuracy",
minimize=False,
outcome_constraints=["final_training_time <= 1800"],
)
return axc
class NASBenchRunner:
"""
A runner for non-Ax methods.
Assumes method MINIMIZES.
"""
def __init__(self, max_eval):
# For tracking iterations
self.fs = []
self.feas = []
self.n_eval = 0
self.max_eval = max_eval
def f(self, x):
if self.n_eval >= self.max_eval:
raise ValueError("Evaluation budget exhuasted")
test_acc, train_time = evaluate_x(x)
feas = bool(train_time <= 1800)
if not feas:
val = 0.80 # bad value for infeasible
else:
val = test_acc
self.n_eval += 1
self.fs.append(test_acc) # Store the true, not-negated value
self.feas.append(feas)
return -val # ASSUMES METHOD MINIMIZES
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from crlapi.benchmark import StreamTrainer
import hydra
from omegaconf import DictConfig, OmegaConf
def to_dict(cfg):
r = {}
for k, v in cfg.items():
if isinstance(v, DictConfig):
td = to_dict(v)
for kk in td:
r[k + "/" + kk] = td[kk]
else:
r[k] = v
return r
@hydra.main(config_path=".", config_name="test_finetune_mlp.yaml")
def main(cfg):
import torch.multiprocessing as mp
mp.set_start_method("spawn")
import time
stream_trainer = StreamTrainer()
stream_trainer.run(cfg)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from crlapi.benchmark import StreamTrainer
import hydra
from omegaconf import DictConfig, OmegaConf
def to_dict(cfg):
r = {}
for k, v in cfg.items():
if isinstance(v, DictConfig):
td = to_dict(v)
for kk in td:
r[k + "/" + kk] = td[kk]
else:
r[k] = v
return r
@hydra.main(config_path=".", config_name="test_finetune_mlp.yaml")
def main(cfg):
import torch.multiprocessing as mp
mp.set_start_method("spawn")
import time
stream_trainer = StreamTrainer()
stream_trainer.run(cfg)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi import instantiate_class,get_class,get_arguments
class StreamTrainer:
def create_logger(self, logger_args,all_args):
self.logger=instantiate_class(logger_args)
self.logger.save_hps(all_args)
def create_stream(self, stream_args):
return instantiate_class(stream_args)
def create_clmodel(self, cl_model_args):
from importlib import import_module
d = dict(cl_model_args)
if "classname" in d:
classname = d["classname"]
else:
classname = d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
self.clmodel=c(self.train_stream,cl_model_args)
def run(self, args):
self.create_logger(args.logger,args)
stream_args = args.stream.train
self.train_stream=self.create_stream(stream_args)
stream_args = args.stream.evaluation
self.evaluation_stream=self.create_stream(stream_args)
clmodel_args = args.clmodel
self.create_clmodel(clmodel_args)
evaluation_args = args.evaluation
#args=_prefix(args,"benchmark/")
evaluation_mode=evaluation_args.mode
assert evaluation_mode=="all_tasks" or evaluation_mode=="previous_tasks"
for n_stage, task in enumerate(self.train_stream):
self.logger.message("Training at stage "+str(n_stage))
training_logger = self.logger.get_logger(f"train_stage_{n_stage}/")
self.clmodel = self.clmodel.update(task, training_logger)
evaluation_logger = self.logger.get_logger(f"evaluation_stage_{n_stage}/")
self.logger.message("Evaluation at stage "+str(n_stage))
for k,evaluation_task in enumerate(self.evaluation_stream):
if evaluation_mode=="previous_tasks" and k>n_stage:
pass
else:
self.logger.message("\tEvaluation on task "+str(k))
evaluation=self.clmodel.evaluate(evaluation_task,evaluation_logger,evaluation_args)
self.logger.message("\t == "+str(evaluation))
for kk,vv in evaluation.items():
evaluation_logger.add_scalar(kk,vv,k)
self.logger.close()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def instantiate_class(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c(**d)
def get_class(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c
def get_arguments(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
return d
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
class TaskResources:
""" Describe resources for a task (e.g a dataset, and environments, etc...)
"""
def make(self):
raise NotImplementedError
class Task:
""" Describe a task to solve with a task descriptor, and associated ressources
"""
def task_descriptor(self):
raise NotImplementedError
def task_resources(self):
raise NotImplementedError
class CLModel:
""" A continual learning model that is updated on different tasks. Such a model can evaluate itself on a particular task
"""
def __init__(self, config):
self.config = config
def update(self, task, logger):
# return a clmodel
raise NotImplementedError
def evaluate(self, task,logger,**evaluation_args):
raise NotImplementedError
class Stream:
""" A stream of tasks
"""
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __getitem__(self,k):
raise NotImplementedError
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from torch.utils.tensorboard import SummaryWriter
import sqlite3
import os
import os.path
import csv
import copy
from datetime import datetime
import torch
import numpy as np
import time
import pickle
import bz2
import sys
import pandas as pd
from omegaconf import DictConfig, OmegaConf
class TFPrefixLogger:
def __init__(self,prefix,logger):
self.logger=logger
self.prefix=prefix
def add_images(self, name, value, iteration):
self.logger.add_images(self.prefix+name,value,iteration)
def add_scalar(self, name, value, iteration):
self.logger.add_scalar(self.prefix+name,value,iteration)
def add_video(self, name, value, iteration, fps=10):
self.logger.add_video(self.prefix+name,value,iteration,fps)
def message(self,msg,from_name=""):
self.logger.message(msg,from_name=self.prefix+from_name)
def debug(self,msg,from_name=""):
self.logger.debug(msg,from_name=self.prefix+from_name)
class TFLogger(SummaryWriter):
"""A logger that stores informations both in tensorboard and CSV formats"""
def __init__(
self, log_dir=None, cache_size=10000, modulo=1,verbose=False, use_zip=True
):
SummaryWriter.__init__(self, log_dir=log_dir)
self.use_zip = use_zip
self.save_every = cache_size
self.modulo=modulo
self.written_values={}
self.log_dir = log_dir
self.verbose = verbose
self.picklename = log_dir + "/db.pickle.bzip2"
if not self.use_zip:
self.picklename = log_dir + "/db.pickle"
self.to_pickle = []
def _omegaconf_to_dict(self,hps):
d={}
for k,v in hps.items():
if isinstance(v,DictConfig):
d[k]=self._omegaconf_to_dict(v)
else:
d[k]=v
return d
def save_hps(self, hps):
hps=self._omegaconf_to_dict(hps)
print(hps)
f = open(self.log_dir + "/params.json", "wt")
f.write(str(hps) + "\n")
f.close()
outfile = open(self.log_dir + "/params.pickle", "wb")
pickle.dump(hps, outfile)
outfile.close()
self.add_text("Hyperparameters", str(hps))
def get_logger(self,prefix):
return TFPrefixLogger(prefix,self)
def message(self,msg,from_name=""):
print("[",from_name,"]: ",msg)
def debug(self,msg,from_name=""):
print("[DEBUG] [",from_name,"]: ",msg)
def _to_pickle(self, name, value, iteration):
self.to_pickle.append((name, iteration, value))
if len(self.to_pickle) > self.save_every:
if self.use_zip:
f = bz2.BZ2File(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
else:
f = open(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
self.to_pickle = []
def add_images(self, name, value, iteration):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value, iteration)
SummaryWriter.add_images(self, name, value, iteration)
def add_scalar(self, name, value, iteration):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value, iteration)
if self.verbose:
print("['" + name + "' at " + str(iteration) + "] = " + str(value))
if isinstance(value, int) or isinstance(value, float):
SummaryWriter.add_scalar(self, name, value, iteration)
def add_video(self, name, value, iteration, fps=10):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value.numpy(), iteration)
SummaryWriter.add_video(self, name, value, iteration, fps=fps)
def close(self):
if len(self.to_pickle) > 0:
if self.use_zip:
f = bz2.BZ2File(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
else:
f = open(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
self.to_pickle = []
SummaryWriter.close(self)
f = open(self.log_dir + "/done", "wt")
f.write("Done\n")
f.close()
class Log:
def __init__(self, hps, values):
self.hps = hps
self.values = values
max_length = max([len(v) for v in self.values])
for k in values:
while len(values[k]) < max_length:
values[k].append(None)
self.length = max_length
def to_xy(self, name):
assert name in self.values
x, y = [], []
for k, v in enumerate(self.values[name]):
if not v is None:
x.append(k)
y.append(v)
return x, y
def to_dataframe(self, with_hps=False):
max_len = np.max([len(k) for v, k in self.values.items()])
nv = {}
for k, v in self.values.items():
while len(v) < max_len:
v.append(None)
nv[k] = v
self.values = nv
it = [k for k in range(max_len)]
d = {**self.values, **{"iteration": it}}
if with_hps:
for k in self.hps:
dd = [self.hps[k] for i in range(max_len)]
d = {**d, **{"_hp/" + k: dd}}
return pd.DataFrame(d)
def get_at(self, name, iteration):
return self.values[name][iteration]
def get(self, name, keep_none=False):
v = self.values[name]
if not keep_none:
return [k for k in v if not k is None]
else:
return v
def replace_None_(self, name):
v = self.values[name]
last_v = None
first_v = None
r = []
for k in range(len(v)):
if v[k] is None:
r.append(last_v)
else:
r.append(v[k])
if last_v is None:
first_v = v[k]
last_v = v[k]
p = 0
while r[p] is None:
r[p] = first_v
p += 1
self.values[name] = r
def max(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
return np.max(vv)
def min(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
return np.min(vv)
def argmin(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
_max = np.max(vv)
for k in range(len(v)):
if v[k] is None:
vv.append(_max + 1.0)
else:
vv.append(v[k])
return np.argmin(vv)
def argmax(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
_min = np.min(vv)
vv = []
for k in range(len(v)):
if v[k] is None:
vv.append(_min - 1.0)
else:
vv.append(v[k])
return np.argmax(vv)
class Logs:
def __init__(self):
self.logs = []
self.hp_names = None
self.filenames = []
def _add(self, log):
self.hp_names = {k: True for k in log.hps}
for l in self.logs:
for k in log.hps:
if not k in l.hps:
l.hps[k] = "none"
self.logs.append(log)
def add(self, logs):
if isinstance(logs, Log):
self._add(logs)
else:
for l in logs:
self._add(l)
def max(self, function):
alls = [function(l) for l in self.logs]
idx = np.argmax(alls)
return self.logs[idx]
def hps(self):
return list(self.hp_names)
def size(self):
return len(self.logs)
def filter(self, hp_name, test_fn):
logs = Logs()
if not callable(test_fn):
for l in self.logs:
h = l.hps[hp_name]
if h == test_fn:
logs.add(l)
else:
for l in self.logs:
if test_fn(l.hps[hp_name]):
logs.add(l)
return logs
def unique_hps(self, name):
r = {}
for l in self.logs:
v = l.hps[name]
r[v] = 1
return list(r.keys())
def __len__(self):
return len(self.logs)
def to_dataframe(self):
rdf = None
for log in self.logs:
df = log.to_dataframe(with_hps=True)
if rdf is None:
rdf = df
else:
rdf = pd.concat([rdf, df])
return rdf
# def plot(self, y, x, hue=None, style=None, row=None, col=None, kind="line"):
def flattify(d):
d=dict(d)
r = {}
for k, v in d.items():
if isinstance(v, dict):
rr = flattify(v)
rrr = {k + "/" + kk: rr[kk] for kk in rr}
r = {**r, **rrr}
elif isinstance(v, list):
r[k] = str(v)
else:
r[k] = v
return r
def read_log(directory, use_bz2=True, debug=False):
print("== Read ", directory)
# if os.path.exists(directory+"/fast.pickle"):
# f=open(directory+"/fast.pickle","rb")
# log=pickle.load(f)
# f.close()
# return log
f = None
if use_bz2:
picklename = directory + "/db.pickle.bzip2"
f = bz2.BZ2File(picklename, "rb")
else:
picklename = directory + "/db.pickle"
f = open(picklename, "rb")
values = {}
try:
while True:
a = pickle.load(f)
if not a is None:
for name, iteration, value in a:
# print(name,iteration,value)
if debug:
print(name, value, type(value))
if isinstance(value, np.int64):
value = int(value)
if (
isinstance(value, int)
or isinstance(value, float)
or isinstance(value, str)
):
if not name in values:
values[name] = []
while len(values[name]) < iteration + 1:
values[name].append(None)
values[name][iteration] = value
except:
f.close()
f = open(directory + "/params.pickle", "rb")
params = pickle.load(f)
params = flattify(params)
f.close()
log = Log(params, values)
log.from_directory = directory
# f=open(directory+"/fast.pickle","wb")
# pickle.dump(log,f)
# f.close()
return log
def read_directory(directory, use_bz2=True):
import os
import os.path
l = Logs()
name = "db.pickle"
if use_bz2:
name = "db.pickle.bzip2"
for dirpath, dirnames, filenames in os.walk(directory):
if name in filenames:
log = read_log(dirpath, use_bz2)
l.add(log)
print("Found %d logs" % l.size())
return l
def _create_col(df, hps, _name):
vs = []
for k, v in df.groupby(hps):
n = {hps[i]: k[i] for i in range(len(hps))}
v = v.copy()
name = ",".join([str(k) + "=" + str(n[k]) for k in n])
print(name)
print(_name)
v[_name] = name
vs.append(v)
return pd.concat(vs)
def plot_dataframe(
df, y, x="iteration", hue=None, style=None, row=None, col=None, kind="line"
):
import seaborn as sns
cols = [y, x]
if isinstance(row, list):
cols += row
else:
cols += [row]
if isinstance(col, list):
cols += col
else:
cols += [col]
if isinstance(style, list):
cols += style
else:
cols += [style]
if isinstance(hue, list):
cols += hue
else:
cols += [hue]
cols = [c for c in cols if not c is None]
df = df[cols].dropna()
if isinstance(row, list):
df = _create_col(df, row, "__row")
row = "__row"
if isinstance(col, list):
df = _create_col(df, col, "__col")
col = "__col"
if isinstance(style, list):
df = _create_col(df, style, "__style")
style = "__style"
if isinstance(hue, list):
df = _create_col(df, hue, "__hue")
hue = "__hue"
# df = convert_iteration_to_steps(df)
sns.relplot(x=x, y=y, hue=hue, style=style, row=row, col=col, data=df, kind=kind)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
class CachedCIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = torch.from_numpy(self.data).float().permute(0, 3, 1, 2)
self.targets = numpy.array(self.targets)
self.data = self.data / 255.
# normalize
mu = torch.Tensor([0.4914, 0.4822, 0.4465]).view(1, 3, 1, 1)
var = torch.Tensor([0.2470, 0.2435, 0.2616]).view(1, 3, 1, 1)
self.data = (self.data - mu) / var
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class CIFAR10Resources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,directory):
self.idx_batch=idx_batch
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def make(self):
dataset=CachedCIFAR10(self.directory, train=self.train, download=True)
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class CIFAR10Task(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(3, 32, 32)
self.n_classes=10
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class CIFAR10EvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
evaluation_resources=CIFAR10Resources(0,1,seed,False,directory)
self.tasks.append(CIFAR10Task(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class CIFAR10TrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=CIFAR10Resources(k,n_megabatches,seed,True,directory)
self.tasks.append(CIFAR10Task(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
# TODO: did not verify this dataset
class CachedEMNIST(torchvision.datasets.EMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.targets = self.targets.numpy()
self.data = (self.data / 255. - .1307) / .3081
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class EMNISTResources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,split,directory):
self.idx_batch=idx_batch
self.split=split
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def n_classes(self):
print("Compputing n classes...")
dataset=CachedEMNIST(self.directory, split=self.split,train=self.train, download=True)
n=len(dataset.classes_split_dict[self.split])
return n
def make(self):
dataset=torchvision.datasets.EMNIST(self.directory, split=self.split,train=self.train, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class EMNISTTask(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(1,28,28)
self.n_classes=self._resources.n_classes()
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class EMNISTEvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,split,directory):
self.tasks=[]
evaluation_resources=EMNISTResources(0,1,seed,False,split,directory)
self.tasks.append(EMNISTTask(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class EMNISTTrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,split,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=EMNISTResources(k,n_megabatches,seed,True,split,directory)
self.tasks.append(EMNISTTask(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
class CachedMNIST(torchvision.datasets.MNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.targets = self.targets.numpy()
self.data = (self.data / 255. - .1307) / .3081
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class MNISTResources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,directory):
self.idx_batch=idx_batch
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def make(self):
dataset=CachedMNIST(self.directory, train=self.train, download=True)
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class MNISTTask(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(1,28,28)
self.n_classes=10
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class MNISTEvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
evaluation_resources=MNISTResources(0,1,seed,False,directory)
self.tasks.append(MNISTTask(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class MNISTTrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=MNISTResources(k,n_megabatches,seed,True,directory)
self.tasks.append(MNISTTask(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import numpy as np
from copy import deepcopy
from crlapi.sl.architectures.mixture_model import (
HardSoftMaxGateModule,
SoftMaxGateModule,
MixtureLayer,
MoE_RandomGrow,
MoE_UsageGrow,
Gate,
MoE,
)
# -- Gates
class SoftGate(Gate):
def __init__(self, input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape, n_experts)
gate_fn = nn.Linear(input_shape, n_experts)
if prepro_fn is not None:
self.prepro_fn = prepro_fn
gate_fn = nn.Sequential(prepro_fn, gate_fn)
self.module = SoftMaxGateModule(gate_fn)
def forward(self,x):
return self.module(x)
class HardGate(Gate):
def __init__(self, input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape, n_experts)
gate_fn = nn.Linear(input_shape, n_experts)
if prepro_fn is not None:
gate_fn = nn.Sequential(prepro_fn, gate_fn)
self.module = HardSoftMaxGateModule(gate_fn)
def forward(self,x):
return self.module(x)
# -- Layers
def _make_layers(array, in_channels):
layers = []
for x in array:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
return in_channels, nn.Sequential(*layers)
def VGG(task, n_channels):
vgg_parts = [ [64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 256, 'M'], [512, 512, 512, 512, 'M'], [512, 512, 512, 512, 'M'] ]
if n_channels > 0:
vgg_parts = [[n_channels if type(x) == int else x for x in block] for block in vgg_parts]
in_channels, block0 = _make_layers(vgg_parts[0], 3)
in_channels, block1 = _make_layers(vgg_parts[1], in_channels)
in_channels, block2 = _make_layers(vgg_parts[2], in_channels)
in_channels, block3 = _make_layers(vgg_parts[3], in_channels)
in_channels, block4 = _make_layers(vgg_parts[4], in_channels)
return nn.Sequential(
block0,
block1,
block2,
block3,
block4,
nn.Flatten(),
nn.Linear(in_channels, task.n_classes)
)
def MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard):
vgg_parts = [ [64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 256, 'M'], [512, 512, 512, 512, 'M'], [512, 512, 512, 512, 'M'] ]
input_shape = task.input_shape
gate = HardGate if is_hard else SoftGate
if n_channels > 0:
vgg_parts = [[n_channels if type(x) == int else x for x in block] for block in vgg_parts]
in_channels, head = _make_layers(vgg_parts[0], 3)
in_channels, block1 = _make_layers(vgg_parts[1], in_channels)
in_channels, block2 = _make_layers(vgg_parts[2], in_channels)
in_channels, block3 = _make_layers(vgg_parts[3], in_channels)
in_channels, block4 = _make_layers(vgg_parts[4], in_channels)
blocks = [block1, block2, block3, block4]
dim_gates = []
x = torch.randn(1,3,32,32)
for layer in [head] + blocks:
x = layer(x)
dim_gates += [x.shape[1:]]
# Build Layers
layers = [head]
for i, (block, dim_gate) in enumerate(zip(blocks, dim_gates[:-1])):
# build adaptive pooling gate
input_size = dim_gate[0] * n_adaptivepooling ** 2
gate_fn = nn.Sequential(
nn.AdaptiveAvgPool2d(n_adaptivepooling),
nn.Flatten(),
)
experts = [deepcopy(block) for _ in range(n_experts)]
layers += [MixtureLayer(gate(input_size, n_experts, gate_fn), experts)]
linear = nn.Linear(np.prod(dim_gates[-1]), task.n_classes)
layers += nn.Sequential(nn.Flatten(), linear)
model = MoE(layers)
return model
def MoE_VGG_RandomGrow(task, n_channels, n_adaptivepooling, n_experts, is_hard, n_experts_to_split):
moe = MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard)
return MoE_RandomGrow(moe.layers,n_experts_to_split)
def MoE_VGG_UsageGrow(task, n_channels, n_adaptivepooling, n_experts, is_hard, n_experts_to_split):
moe = MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard)
return MoE_UsageGrow(moe.layers,n_experts_to_split)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import random
import numpy as np
class SoftMaxGateModule(nn.Module):
def __init__(self,module):
super().__init__()
self.module=module
def forward(self,x):
y=self.module(x)
return torch.softmax(y,dim=1)
class HardSoftMaxGateModule(nn.Module):
def __init__(self,module):
super().__init__()
self.module=module
def forward(self,x):
y=self.module(x)
if self.training:
dist=torch.distributions.Categorical(y)
sampled_y=dist.sample()
oh=F.one_hot(sampled_y,num_classes=p.size()[1])
return oh+(y-y.detach())
else:
_max=y.max(1)[1]
oh=F.one_hot(_max,num_classes=y.size()[1])
return oh
class Gate(nn.Module):
def __init__(self,input_shape,n_experts, prepro_fn=None):
self.input_shape=input_shape
super().__init__()
def _weight(output,score):
s=output.size()
while(len(score.size())<len(s)):
score=score.unsqueeze(-1)
score=score.repeat(1,*s[1:])
return output*score
class MixtureLayer(nn.Module):
def __init__(self,gate_module,experts):
super().__init__()
assert isinstance(gate_module,Gate)
self.gate=gate_module
self.experts=nn.ModuleList(experts)
def forward(self,x):
out=0.0
scores=self.gate(x)
gate_scores=[]
for k,e in enumerate(self.experts):
score=scores[:,k]
if isinstance(e,MixtureLayer):
y,g=e(x)
for kk,vv in enumerate(g):
gate_scores.append(([k]+vv[0],vv[1]*score))
else:
y=e(x)
gate_scores.append(([k],score))
y=_weight(y,score)
out=out+y
return out,gate_scores
class MoE(nn.Module):
def __init__(self,layers):
super().__init__()
self.layers=nn.ModuleList(layers)
@property
def device(self):
return next(self.parameters()).device
def forward(self,x,with_gate_scores=False):
gate_scores=[]
for l in self.layers:
if isinstance(l,MixtureLayer):
x,g=l(x)
gate_scores.append(g)
else:
x=l(x)
if with_gate_scores:
return x,gate_scores
else:
return x
class MoE_RandomGrow(MoE):
def __init__(self,layers,n_experts_split):
super().__init__(layers)
self.n_experts_split=n_experts_split
def _list_experts(self,layer):
assert isinstance(layer,MixtureLayer)
experts_url=[]
for k,e in enumerate(layer.experts):
if not isinstance(e,MixtureLayer):
experts_url.append([k])
else:
le=self._list_experts(e)
for v in le:
experts_url.append([k]+v)
return experts_url
def _generate_splitting(self,layer,url_to_split):
idx_split=url_to_split[0]
gate=copy.deepcopy(layer.gate)
experts=[]
for k,e in enumerate(layer.experts):
if k!=idx_split:
experts.append(copy.deepcopy(e))
elif len(url_to_split)>1:
experts.append(self._generate_splitting(e,url_to_split[1:]))
else:
n_experts=[copy.deepcopy(e) for _ in range(self.n_experts_split)]
n_gate=layer.gate.__class__(layer.gate.input_shape, self.n_experts_split, getattr(layer.gate, 'prepro_fn', None))
experts.append(MixtureLayer(n_gate,n_experts))
return MixtureLayer(gate,experts)
def _grow_layer(self,layer):
assert isinstance(layer,MixtureLayer)
#First, we list all the experts
experts_urls=self._list_experts(layer)
print("\tList of experts: ",experts_urls)
#Choose one expert at random
expert_to_split=random.choice(experts_urls)
print("\t\tSplitting expert: "+str(expert_to_split))
new_module=self._generate_splitting(layer,expert_to_split)
experts_urls=self._list_experts(new_module)
print("\t\tNew list of experts = ",experts_urls)
return new_module
def grow(self,dataset_loader,**args):
if self.n_experts_split==0:
return self
self.zero_grad()
new_layers=[]
for l in self.layers:
if isinstance(l,MixtureLayer):
new_layers.append(self._grow_layer(l))
else:
new_layers.append(copy.deepcopy(l))
return MoE_RandomGrow(new_layers,self.n_experts_split)
class MoE_UsageGrow(MoE):
def __init__(self,layers,n_experts_split):
super().__init__(layers)
self.n_experts_split=n_experts_split
def _list_experts(self,layer):
assert isinstance(layer,MixtureLayer)
experts_url=[]
for k,e in enumerate(layer.experts):
if not isinstance(e,MixtureLayer):
experts_url.append([k])
else:
le=self._list_experts(e)
for v in le:
experts_url.append([k]+v)
return experts_url
def _generate_splitting(self,layer,url_to_split):
idx_split=url_to_split[0]
gate=copy.deepcopy(layer.gate)
experts=[]
for k,e in enumerate(layer.experts):
if k!=idx_split:
experts.append(copy.deepcopy(e))
elif len(url_to_split)>1:
experts.append(self._generate_splitting(e,url_to_split[1:]))
else:
n_experts=[copy.deepcopy(e) for _ in range(self.n_experts_split)]
n_gate=layer.gate.__class__(layer.gate.input_shape, self.n_experts_split, getattr(layer.gate, 'prepro_fn', None))
experts.append(MixtureLayer(n_gate,n_experts))
return MixtureLayer(gate,experts)
def _grow_layer(self,layer,to_split_expert):
assert isinstance(layer,MixtureLayer)
#First, we list all the experts
experts_urls=self._list_experts(layer)
print("\tList of experts: ",experts_urls)
print("\t To split: ",to_split_expert)
assert to_split_expert in experts_urls
new_module=self._generate_splitting(layer,to_split_expert)
experts_urls=self._list_experts(new_module)
print("\t\tNew list of experts = ",experts_urls)
return new_module
def grow(self,dataset_loader,**args):
if self.n_experts_split==0:
return self
with torch.no_grad():
usage=None
n=0
for x,y in dataset_loader:
x, y = x.to(self.device), y.to(self.device)
out,gate_scores=self(x,with_gate_scores=True)
loss=F.cross_entropy(out,y,reduction='none')
gate_scores=[[(gg[0],gg[1].sum(0)) for gg in g] for g in gate_scores]
n+=x.size()[0]
if usage is None:
usage=gate_scores
else:
for k,g in enumerate(gate_scores):
for kk,gg in enumerate(g):
assert gg[0]==usage[k][kk][0]
usage[k][kk]=(gg[0],gg[1]+usage[k][kk][1])
self.zero_grad()
new_layers=[]
p=0
for k,l in enumerate(self.layers):
if isinstance(l,MixtureLayer):
u=usage[p]
us=[uu[1].item() for uu in u]
idx=np.argmax(us)
print("Expert usage at layer ",k," is ",{str(uu[0]):uu[1].item() for uu in u})
max_expert=u[idx][0]
print("\tSplitting expert ",max_expert)
new_layers.append(self._grow_layer(l,max_expert))
p+=1
else:
new_layers.append(copy.deepcopy(l))
return MoE_UsageGrow(new_layers,self.n_experts_split)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from crlapi.sl.architectures.mixture_model import MixtureLayer,SoftMaxGateModule,HardSoftMaxGateModule,Gate,MoE,MoE_RandomGrow,MoE_UsageGrow
class MLP(nn.Module):
def __init__(self,task,**args):
super().__init__()
input_shape=task.input_shape
d=1
for k in input_shape:
d*=k
input_dim=d
output_dim=task.n_classes
sizes=[input_dim]+[args["size_layers"] for k in range(args["n_layers"])]+[output_dim]
print(sizes)
layers=[]
for k in range(len(sizes)-1):
layers.append(nn.Linear(sizes[k],sizes[k+1]))
if not k==len(sizes)-2:
layers.append(nn.ReLU())
self.model=nn.Sequential(*layers)
def forward(self,x):
x=torch.flatten(x,start_dim=1)
return self.model(x)
class LinearSoftGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=SoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
class LinearHardGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=HardSoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
def mlp_layers(task,size_layers,n_layers,n_experts,is_hard):
input_shape=task.input_shape
d=1
for k in input_shape:
d*=k
input_dim=d
output_dim=task.n_classes
sizes=[input_dim]+[size_layers for k in range(n_layers)]+[output_dim]
layers=[nn.Flatten(start_dim=1)]
for k in range(len(sizes)-2):
if is_hard:
gate=LinearHardGate([sizes[k]],n_experts)
else:
gate=LinearSoftGate([sizes[k]],n_experts)
experts=[nn.Sequential(nn.Linear(sizes[k],sizes[k+1]),nn.ReLU()) for _ in range(n_experts)]
layer=MixtureLayer(gate,experts)
layers.append(layer)
layers.append(nn.Linear(sizes[-2],sizes[-1]))
return layers
def MoE_MLP(task,size_layers,n_layers,n_experts,is_hard):
return MoE(mlp_layers(task,size_layers,n_layers,n_experts,is_hard))
def MoE_MLP_RandomGrow(task,size_layers,n_layers,n_experts,is_hard,n_experts_to_split):
return MoE_RandomGrow(mlp_layers(task,size_layers,n_layers,n_experts,is_hard),n_experts_to_split)
def MoE_MLP_UsageGrow(task,size_layers,n_layers,n_experts,is_hard,n_experts_to_split):
return MoE_UsageGrow(mlp_layers(task,size_layers,n_layers,n_experts,is_hard),n_experts_to_split)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.utils.data
from torch.nn import functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
self.activation = nn.ReLU()
def forward(self, x):
out = self.activation(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = out + self.shortcut(x)
out = self.activation(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf, input_size):
super(ResNet, self).__init__()
self.in_planes = nf
self.input_size = input_size
self.conv1 = conv3x3(input_size[0], nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
# hardcoded for now
last_hid = nf * 8 * block.expansion
last_hid = last_hid * (self.input_size[-1] // 2 // 2 // 2 // 4) ** 2
self.linear = nn.Linear(last_hid, num_classes)
self.activation = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def return_hidden(self, x):
bsz = x.size(0)
assert x.ndim == 4
out = self.activation(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
# out = F.adaptive_avg_pool2d(out, 1)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def forward(self, x):
out = self.return_hidden(x)
out = self.linear(out)
return out
def ResNet18(nclasses, nf=20, input_size=(3, 32, 32), *args, **kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf, input_size, *args, **kwargs)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import copy
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
def percentile(t, qq):
k = int(qq * t.numel()) #1 + round(float(q) * (t.numel() - 1))
return t.view(-1).kthvalue(k).values.item()
class GetSubnet(torch.autograd.Function):
@staticmethod
def forward(ctx, scores, zeros, ones, sparsity):
k_val = percentile(scores, sparsity)
return torch.where(scores < k_val, zeros.to(scores.device), ones.to(scores.device))
@staticmethod
def backward(ctx, g):
return g, None, None, None
# Not learning weights, finding subnet
class SubnetConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
self.register_buffer('ones', torch.ones_like(self.scores.data))
self.register_buffer('zeros', torch.zeros_like(self.scores.data))
# hardcoded for now
self.prune_rate = 0.5
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
@property
def clamped_scores(self):
return self.scores.abs()
def forward(self, x):
subnet = GetSubnet.apply(self.clamped_scores, self.zeros, self.ones, self.prune_rate)
w = self.weight * subnet
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
# -- Layers
def _make_layers(array, in_channels):
layers = []
for x in array:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [SubnetConv(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x, affine=False),
nn.ReLU(inplace=True)]
in_channels = x
return in_channels, layers
class SubnetVGG(nn.Module):
def __init__(self, task, n_channels, grow_n_units):
super().__init__()
self.grow_n_units = grow_n_units
vgg_parts = [ 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M' ]
if n_channels > 0:
vgg_parts = [n_channels if type(x) == int else x for x in vgg_parts]
out_channels, base = _make_layers(vgg_parts, 3)
self.net = nn.Sequential(
*base,
SubnetConv(out_channels, task.n_classes, kernel_size=1, padding=0),
nn.Flatten(),
)
def forward(self, x):
return self.net(x)
def grow(self, valid_loader, **args):
x = torch.FloatTensor(64, 3, 32, 32).normal_()
new_layers = []
for i, layer in enumerate(self.net):
if isinstance(layer, SubnetConv):
# input size
in_c = 3 if i == 0 else last_output_channels
# output size
out_c = layer.out_channels + (self.grow_n_units if i < len(self.net) - 2 else 0)
# what is the minimal score to be selected ?
max_val = percentile(layer.scores.abs(), layer.prune_rate)
min_val = layer.scores.abs().min().item()
# init new layer
new_layer = SubnetConv(in_c, out_c, kernel_size=layer.kernel_size, padding=layer.padding)
new_layer.scores.data.uniform_(min_val, max_val)
# adjust the prune rate so that the same amount of points get selected
new_layer.prune_rate = 1 - (1 - layer.prune_rate) * layer.weight.numel() / new_layer.weight.numel()
# copy the old params
a, b, c, d = layer.scores.size()
new_layer.weight[:a, :b, :c, :d].data.copy_(layer.weight.data)
new_layer.scores[:a, :b, :c, :d].data.copy_(layer.scores.data)
new_layer.bias.data.fill_(0)
new_layer.bias[:a].data.copy_(layer.bias)
last_output_channels = out_c
new_layers += [new_layer]
new_sub = torch.where(new_layer.clamped_scores < percentile(new_layer.clamped_scores, new_layer.prune_rate), new_layer.zeros, new_layer.ones)
import pdb
# assert torch.allclose(layer(x[:, :b]), new_layer(x)[:, :a]), pdb.set_trace()
elif isinstance(layer, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(last_output_channels, affine=False)
c = layer.running_mean.size(0)
new_bn.running_mean[:c].data.copy_(layer.running_mean.data)
new_bn.running_var[:c].data.copy_(layer.running_var.data)
new_layers += [new_bn]
new_bn.training = layer.training
# assert torch.allclose(layer(x[:, :c]), new_bn(x)[:, :c], atol=1e-7)
else:
new_layers += [copy.deepcopy(layer)]
x = new_layers[-1](x)
net = nn.Sequential(*new_layers)
copy_self = copy.deepcopy(self)
copy_self.net = net
print(net)
return copy_self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
from functools import partial
from einops.layers.torch import Rearrange, Reduce
import torch.nn as nn
from crlapi.sl.architectures.mixture_model import MixtureLayer,SoftMaxGateModule,HardSoftMaxGateModule,Gate,MoE,MoE_RandomGrow,MoE_UsageGrow
class LinearSoftGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=SoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
print(x.size())
return self.module(x)
class LinearHardGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=HardSoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
return self.fn(self.norm(x)) + x
def FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):
return nn.Sequential(
dense(dim, dim * expansion_factor),
PrintModule("After dense"),
nn.GELU(),
nn.Dropout(dropout),
dense(dim * expansion_factor, dim),
nn.Dropout(dropout)
)
class PrintModule(nn.Module):
def __init__(self,msg=""):
super().__init__()
self.msg=msg
def forward(self,x):
print(self.msg," : ",x.size())
return x
def MLPMixer(task, patch_size, dim, depth, expansion_factor = 4, dropout = 0.):
image_size= task.input_shape[1]
assert image_size==task.input_shape[2]
channels=task.input_shape[0]
num_classes=task.n_classes
assert (image_size % patch_size) == 0, 'image must be divisible by patch size'
num_patches = (image_size // patch_size) ** 2
chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
return nn.Sequential(
PrintModule("L1"),
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
PrintModule("L2"),
nn.Linear((patch_size ** 2) * channels, dim),
PrintModule("L3"),
*[nn.Sequential(
PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),
PreNormResidual(dim, FeedForward(dim, expansion_factor, dropout, chan_last)),
PrintModule("L."),
) for _ in range(depth)],
PrintModule("L4"),
nn.LayerNorm(dim),
Reduce('b n c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
import time
import itertools
from copy import deepcopy
from pydoc import locate
from fvcore.nn import FlopCountAnalysis as FCA
from torchvision.models import *
from . import sp
# ----------------------------------------------------------------
# Models
# ----------------------------------------------------------------
class module_list_wrapper(nn.Module):
def __init__(self, layers):
super().__init__()
self.layer = nn.Sequential(*layers)
def forward(self, x):
out = self.layer(x)
return out
def __getitem__(self, i):
return self.layer[i]
def __len__(self):
return len(self.layer)
def sp_vgg(model, n_classes=10, dimh=16, method='none'):
cfgs = {
'vgg11': [1, 'M', 2, 'M', 4, 4, 'M', 8, 8, 'M', 8, 8, 'M'],
'vgg14': [1, 1, 'M', 2, 2, 'M', 4, 4, 'M', 8, 8, 'M', 8, 8, 'M'],
'vgg16': [1, 1, 'M', 2, 2, 'M', 4, 4, 4, 'M', 8, 8, 8, 'M', 8, 8, 8, 'M'],
'vgg19': [1, 1, 'M', 2, 2, 'M', 4, 4, 4, 4, 'M', 8, 8, 8, 8, 'M', 8, 8, 8, 8, 'M'],
}
cfg = cfgs[model]
next_layers = {}
prev_idx = -1
in_channels = 3
net = []
n = len(cfg)
for i, x in enumerate(cfg):
if x == 'M':
net.append(nn.MaxPool2d(kernel_size=2, stride=2))
elif x == 'A':
net.append(nn.AvgPool2d(kernel_size=2, stride=2))
else:
if method == 'none':
net.append(sp.Conv2d(in_channels, 64*x, kernel_size=3, padding=1, actv_fn='relu', has_bn=True))
in_channels = 64*x
else:
net.append(sp.Conv2d(in_channels, dimh, kernel_size=3, padding=1, actv_fn='relu', has_bn=True))
in_channels = dimh
if prev_idx >= 0: next_layers[prev_idx] = [i]
prev_idx = i
net.append(sp.Conv2d(in_channels, n_classes, kernel_size=1, padding=0, actv_fn='none', can_split=False))
net.append(nn.Flatten())
net = module_list_wrapper(net)
old_fwd = net.forward
next_layers[prev_idx] = [n]
layer2split = list(next_layers.keys())
return net, next_layers, layer2split
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
from torch.optim import *
from crlapi.sl.architectures.firefly_vgg import sp
###############################################################################
#
# Split Network
#
###############################################################################
class SpNet(nn.Module):
def __init__(self):
super(SpNet, self).__init__()
self.net = None
self.next_layers = {}
self.previous_layers = {}
self.layers_to_split = []
self.verbose = True
self.n_elites = 0
self.num_group = 1
def create_optimizer(self):
pass
def forward(self, x):
pass
def split(self):
pass
def clear(self):
for layer in self.net:
if isinstance(layer, sp.SpModule):
layer.clear()
def get_num_elites(self):
n = 0
for i in self.layers_to_split:
n += self.net[i].module.weight.shape[0]
self.n_elites = int(n * self.grow_ratio)
def get_num_elites_group(self, group_num):
for g in range(group_num):
n = 0
for i in self.layers_to_split_group[g]:
n += self.net[i].module.weight.shape[0]
try:
self.n_elites_group[g] = int(n * self.grow_ratio)
except:
self.n_elites_group = {}
self.n_elites_group[g] = int(n * self.grow_ratio)
def sp_threshold(self):
ws, wi = torch.sort(torch.cat([self.net[i].w for i in self.layers_to_split]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites]
return threshold
def sp_threshold_group(self, group_num):
ws, wi = torch.sort(torch.cat([self.net[i].w for i in self.layers_to_split_group[group_num]]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites_group[group_num]]
return threshold
def save(self, path='./tmp.pt'):
torch.save(self.state_dict(), path)
def load(self, path='./tmp.pt'):
self.load_state_dict(torch.load(path))
def get_num_params(self):
model_n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
return model_n_params
def spe(self, dataloader, n_batches):
pass
def spf(self, dataloader, n_batches):
pass
def spff(self, dataloader, n_batches):
pass
def split(self, split_method, dataset, n_batches=-1):
self.num_group = 1 if self.config.model != 'mobile' else 2
if split_method not in ['random', 'exact', 'fast', 'firefly', 'fireflyn']:
raise NotImplementedError
if self.verbose:
print('[INFO] start splitting ...')
start_time = time.time()
self.net.eval()
if self.num_group == 1:
self.get_num_elites()
else:
self.get_num_elites_group(self.num_group)
split_fn = {
'exact': self.spe,
'fast': self.spf,
'firefly': self.spff,
'fireflyn': self.spffn,
}
if split_method != 'random':
split_fn[split_method](dataset, n_batches)
n_neurons_added = {}
if split_method == 'random':
n_layers = len(self.layers_to_split)
n_total_neurons = 0
threshold = 0.
for l in self.layers_to_split:
n_total_neurons += self.net[l].get_n_neurons()
n_grow = int(n_total_neurons * self.grow_ratio)
n_new1 = np.random.choice(n_grow, n_layers, replace=False)
n_new1 = np.sort(n_new1)
n_news = []
for i in range(len(n_new1) - 1):
if i == 0:
n_news.append(n_new1[i])
n_news.append(n_new1[i + 1] - n_new1[i])
else:
n_news.append(n_new1[i + 1] - n_new1[i])
n_news[-1] += 1
for i, n_new_ in zip(reversed(self.layers_to_split), n_news):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
n_new, idx = self.net[i].random_split(n_new_)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
for j in self.next_layers[i]:
self.net[j].passive_split(idx)
elif split_method == 'fireflyn':
if self.num_group == 1:
threshold = self.sp_threshold()
for i in reversed(self.layers_to_split):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
if self.num_group != 1:
group = self.total_group[i]
threshold = self.sp_threshold_group(group)
n_new, split_idx, new_idx = self.net[i].spffn_active_grow(threshold)
sp_new = split_idx.shape[0] if split_idx is not None else 0
n_neurons_added[i] = (sp_new, n_new-sp_new)
if self.net[i].kh == 1:
isfirst = True
else:
isfirst = False
for j in self.next_layers[i]:
print('passive', self.net[j].module.weight.shape)
self.net[j].spffn_passive_grow(split_idx, new_idx)
else:
threshold= self.sp_threshold()
# actual splitting
for i in reversed(self.layers_to_split):
if isinstance(self.net[i], sp.SpModule) and self.net[i].can_split:
n_new, idx = self.net[i].active_split(threshold)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
for j in self.next_layers[i]:
self.net[j].passive_split(idx)
self.net.train()
self.clear() # cleanup auxiliaries
self.create_optimizer() # re-initialize optimizer
end_time = time.time()
if self.verbose:
print('[INFO] splitting takes %10.4f sec. Threshold value is %10.9f' % (
end_time - start_time, threshold))
if split_method == 'fireflyn':
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows (sp %d | new %d)' % (x, y1, y2) for x, (y1, y2) in n_neurons_added.items()]))
else:
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows %d neurons' % (x, y) for x, y in n_neurons_added.items()]))
return n_neurons_added
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
from .conv import Conv2d
from .net import SpNet
from .module import SpModule
__all__ = [
'SpNet', 'SpModule',
'Conv2d',
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
from torch.optim import *
###############################################################################
#
# Split Block Abstract Class
#
###############################################################################
class SpModule(nn.Module):
def __init__(self,
can_split=True,
actv_fn='relu',
has_bn=False,
has_bias=True,
rescale=1.0):
super(SpModule, self).__init__()
# properties
self.can_split = can_split
self.actv_fn = actv_fn
self.has_bn = has_bn
self.has_bias = has_bias
self.epsilon = 1e-2
self.K = 70
# modules
self.module = None
self.bn = None
# auxiliaries
self.w = None
self.v = None
self.y = None
self.S = []
self.leaky_alpha = 0.2
def clear(self):
del self.w
del self.v
del self.y
del self.S
try:
del self.vni
except:
pass
try:
del self.vno
except:
pass
self.w = self.v = self.y = None
self.S = []
def get_device(self):
try:
return 'cuda' if self.module.weight.data.is_cuda else 'cpu'
except:
raise Exception('[ERROR] no module initialized')
def _d2_actv(self, x, beta=3.):
if self.actv_fn == 'relu':
# use 2nd order derivative of softplus for approximation
s = torch.sigmoid(x*beta)
return beta*s*(1.-s)
elif self.actv_fn == 'softplus':
s = torch.sigmoid(x)
return s*(1.-s)
elif self.actv_fn == 'rbf':
return (x.pow(2)-1)*(-x.pow(2)/2).exp()
elif self.actv_fn == 'leaky_relu':
s = torch.sigmoid(x*beta)
return beta*s*(1.-s)*(1.-self.leaky_alpha)
elif self.actv_fn == 'swish':
s = torch.sigmoid(x)
return s*(1.-s) + s + x*s*(1.-s) - (s.pow(2) + 2.*x*s.pow(2)*(1.-s))
elif self.actv_fn == 'sigmoid':
s = torch.sigmoid(x)
return (s-s.pow(2)) * (1.-s).pow(2)
elif self.actv_fn == 'tanh':
h = torch.tanh(x)
return -2.*h * (1-h.pow(2))
elif self.actv_fn == 'none':
return torch.ones_like(x)
else:
raise NotImplementedError
def _activate(self, x):
if self.actv_fn == 'relu':
return F.relu(x)
elif self.actv_fn == 'leaky_relu':
return F.leaky_relu(x, self.leaky_alpha)
elif self.actv_fn == 'swish':
return x * torch.sigmoid(x)
elif self.actv_fn == 'rbf':
return (-x.pow(2)/2).exp()
elif self.actv_fn == 'sigmoid':
return torch.sigmoid(x)
elif self.actv_fn == 'tanh':
return torch.tanh(x)
elif self.actv_fn == 'softplus':
return F.softplus(x)
elif self.actv_fn == 'none':
return x
else:
raise NotImplementedError
def forward(self, x):
x = self.module(x)
if self.has_bn:
x = self.bn(x)
return self._activate(x)
def active_split(self, threshold):
pass
def passive_split(self, idx):
pass
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# *** MODULES taken from original code https://github.com/klightz/Firefly
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, grad
from torch.distributions import Normal
from torch.optim import *
from .module import SpModule
from crlapi.sl.architectures.firefly_vgg import sp
###############################################################################
#
# Conv2d Split Layer
#
###############################################################################
class Conv2d(SpModule):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
groups = 1,
can_split=True,
bias=True,
actv_fn='relu',
has_bn=False,
rescale=1.0):
super().__init__(can_split=can_split,
actv_fn=actv_fn,
has_bn=has_bn,
has_bias=bias,
rescale=rescale)
if has_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.has_bias = False
if isinstance(kernel_size, int):
self.kh = self.kw = kernel_size
else:
assert len(kernel_size) == 2
self.kh, self.kw = kernel_size
if isinstance(padding, int):
self.ph = self.pw = padding
else:
assert len(padding) == 2
self.ph, self.pw = padding
if isinstance(stride, int):
self.dh = self.dw = stride
else:
assert len(stride) == 2
self.dh, self.dw = stride
self.groups = groups
self.module = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups = groups,
stride=stride,
padding=padding,
bias=self.has_bias)
def get_conv_patches(self, x):
x = F.pad(x, (self.pw, self.pw, self.ph, self.ph)) # pad (left, right, top, bottom)
# get all image windows of size (kh, kw) and stride (dh, dw)
patches = x.unfold(2, self.kh, self.dh).unfold(3, self.kw, self.dw)
patches = patches.permute(0, 2, 3, 1, 4, 5).contiguous() # [B, H, W, C_in, kh, kw]
return patches
###########################################################################
# fast split
###########################################################################
def spf_reset(self):
# y is a dummy variable for storing gradients of v
W = self.module.weight.data
self.y = nn.Parameter(torch.zeros_like(W))
self.y.retain_grad()
self.v = nn.Parameter(torch.zeros_like(W))
self.v.data.uniform_(-1e-1, 1e-1)
self.v.retain_grad()
self.w = 0.
def spf_update_v(self):
v = self.v
sv = self.y.grad
vv = v.pow(2).sum([1,2,3], keepdim=True)
vsv = (sv * v).sum([1,2,3], keepdim=True)
v_grad = 2. * (sv * vv - v * vsv) / vv.pow(2)
self.v.grad = v_grad
self.y.grad = None
def spf_update_w(self, n=1.):
v = self.v
sv = self.y.grad
vv = v.pow(2).sum([1,2,3])
vsv = (sv * v).sum([1,2,3])
self.w += (vsv / vv).data.clone() / n
def spf_forward(self, x):
out = self.module(x) # [B, C_out]
bn_coef = 1.
if self.has_bn:
self.bn.eval() # fix running mean/variance
out = self.bn(out)
# calculate bn_coef
bn_coef = 1. / torch.sqrt(self.bn.running_var + 1e-5) * self.bn.weight
bn_coef = bn_coef.view(1, -1, 1, 1) # [1, C_out, 1, 1]
# normalize v
v_norm = self.v.pow(2).sum([1,2,3], keepdim=True).sqrt().data
self.v.data = self.v.data / v_norm
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
x = patches.reshape(B*H*W, -1)
left = x.mm(self.y.view(-1, C_in*kh*kw).t()).view(B, H, W, -1).permute(0,3,1,2)
right = x.mm(self.v.view(-1, C_in*kh*kw).t()).view(B, H, W, -1).permute(0,3,1,2)
aux = self._d2_actv(out) * (bn_coef*left) * (bn_coef*right)
out = self._activate(out) + aux
return out
###########################################################################
# firefly split + new neurons
###########################################################################
def spffn_add_new(self, enlarge_out=True, enlarge_in=True):
self.eout = self.K if enlarge_out else 0
self.ein = self.K if enlarge_in else 0
if self.groups == 1:
C_out, C_in = self.module.weight.data.shape[:2]
else:
C_out, _ = self.module.weight.data.shape[:2]
C_in = C_out
device = self.get_device()
if self.has_bn and self.eout > 0:
new_bn = nn.BatchNorm2d(C_out+self.eout).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.weight.data[C_out:] = 1.
new_bn.bias.data[C_out:] = 0.
self.bn = new_bn
self.bn.eval()
if self.groups != 1:
self.groups += self.K
new_layer = nn.Conv2d(in_channels=C_in+self.ein,
out_channels=C_out+self.eout,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias, groups = self.groups).to(device)
new_layer.weight.data[:C_out, :C_in, :, :] = self.module.weight.data.clone()
if self.ein > 0:
new_layer.weight.data[:, C_in:, :, :] = 0.
if self.eout > 0:
new_layer.weight.data[C_out:, :, :, :] = 0.
self.module = new_layer
self.module.eval()
def spffn_penalty(self):
penalty = 0.
if self.can_split: penalty += self.v.pow(2).sum()
if self.eout > 0: penalty += 1e-2 * self.vno.pow(2).sum()
if penalty > 0: (penalty * 1e-2).backward()
def spffn_clip(self):
if self.ein > 0: # since output is just 1
self.vni.data.clamp_(-1e-2, 1e-2)
def spffn_reset(self):
if self.groups == 1:
C_out, C_in, kh, kw = self.module.weight.data.shape
else:
C_out, C_in, kh, kw = self.module.weight.data.shape
C_in = C_out
device = self.get_device()
self.y = nn.Parameter(torch.zeros(1,C_out,1,1)).to(device)
self.y.retain_grad()
self.w = 0.
if self.can_split:
v = torch.zeros(C_out-self.eout, C_in-self.ein,kh,kw).to(device)
v.uniform_(-1e-1, 1e-1)
self.v = nn.Parameter(v)
if self.ein > 0:
vni = torch.zeros(C_out, self.ein, kh, kw).to(device)
vni.uniform_(-1e-2, 1e-2)
self.vni = nn.Parameter(vni)
if self.eout > 0:
vno = torch.zeros(self.eout, C_in-self.ein, kh, kw).to(device)
n = kh * kw * (C_in - self.ein)
stdv = 1. / math.sqrt(n)
#vno.uniform_(-stdv, stdv)
vno.normal_(0, 0.1)
self.vno = nn.Parameter(vno)
def spffn_update_w(self, d, output = False):
if not output:
self.w += (self.y.grad.data / d).view(-1)
self.y.grad = None
else:
y_grad = grad(self.output.mean(), self.y)
self.w += (self.y.grad.data / y_grad[0].data / d).view(-1)
self.y.grad = None
def spffn_forward(self, x, alpha=-1):
out = self.module(x) # [out+eout, in+ein, H, W]
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
C_out = out.shape[1]
cin, cout = C_in - self.ein, C_out - self.eout
x = patches.view(B*H*W, -1, kh*kw)
if self.ein > 0:
x1, x2 = x[:,:cin,:].view(B*H*W, -1), x[:,cin:,:].view(B*H*W,-1)
else:
x1 = x.view(B*H*W, -1)
if self.can_split:
noise_v = x1.mm(self.v.view(-1, cin*kh*kw).t()).view(B,H,W,-1).permute(0,3,1,2) # [B,cout,H,W]
if alpha >= 0.:
noise_v = (noise_v.detach() * self.y[:,:cout,:,:] + noise_v * alpha)
if self.eout > 0:
noise_vo = x1.mm(self.vno.view(-1, cin*kh*kw).t()).view(B,H,W,-1).permute(0,3,1,2)
if alpha >= 0.:
noise_vo = (noise_vo.detach() * self.y[:,cout:,:,:] + noise_vo * alpha)
if self.ein > 0:
noise_vi1 = x2.mm(self.vni.view(-1, self.ein*kh*kw).t())
if self.eout > 0:
noise_vi1, noise_vi2 = noise_vi1[:,:cout], noise_vi1[:,cout:] # [B*H*W, cout/eout]
noise_vi1 = noise_vi1.view(B,H,W,-1).permute(0,3,1,2)
noise_vi2 = noise_vi2.view(B,H,W,-1).permute(0,3,1,2)
else:
noise_vi1 = noise_vi1.view(B,H,W,-1).permute(0,3,1,2)
o1_plus = o1_minus = o2 = 0.
if self.can_split:
o1_plus = out[:,:cout,:,:] + noise_v # [B, cout, H, W]
o1_minus = out[:,:cout,:,:] - noise_v # [B, cout, H, W]
if self.eout > 0:
o2 = out[:,cout:,:,:] + noise_vo
if self.ein > 0:
o1_plus = o1_plus + noise_vi1
o1_minus = o1_minus + noise_vi1
if self.eout > 0:
o2 = o2 + noise_vi2
if self.eout > 0:
o1_plus = torch.cat((o1_plus, o2), 1)
o1_minus = torch.cat((o1_minus, o2), 1)
if self.has_bn:
o1_plus = self.bn(o1_plus)
o1_minus = self.bn(o1_minus)
o1_plus = self._activate(o1_plus)
o1_minus = self._activate(o1_minus)
output = (o1_plus + o1_minus) / 2.
else:
o1 = out[:,:cout,:,:]
if self.eout > 0:
o2 = out[:,cout:,:,:] + noise_vo
if self.ein > 0:
o2 = o2 + noise_vi2
if self.ein > 0:
o1 = o1 + noise_vi1
if self.eout > 0:
o1 = torch.cat((o1, o2), 1)
if self.has_bn:
o1 = self.bn(o1)
output = self._activate(o1)
self.output = output
return output
###########################################################################
# firefly split
###########################################################################
def spff_reset(self):
W = self.module.weight.data
device = self.get_device()
self.y = nn.Parameter(torch.zeros(1, W.shape[0], 1, 1)).to(device)
self.y.retain_grad()
self.v = nn.Parameter(torch.zeros_like(W))
self.v.data.uniform_(-1e-1, 1e-1)
self.w = 0.
def spff_update_w(self, d):
self.w += (self.y.grad.data/d).view(-1)
def spff_scale_v(self):
self.v.data = self.v.data * 1e2
def spff_forward(self, x, alpha=-1):
out = self.module(x)
patches = self.get_conv_patches(x)
B, H, W, C_in, kh, kw = patches.size()
x = patches.view(B*H*W, -1)
if alpha >= 0.:
noise_out = x.mm(self.v.view(-1, C_in*kh*kw).t())
noise_out = noise_out.view(B, H, W, -1).permute(0, 3, 1, 2)
noise_out = (self.y * noise_out.detach() + noise_out * alpha)
else:
noise_out = x.mm(self.v.view(-1, C_in*kh*kw).t())
noise_out = noise_out.view(B, H, W, -1).permute(0, 3, 1, 2)
out_plus = out + noise_out
out_minus = out - noise_out
if self.has_bn:
self.bn.eval()
out_plus = self.bn(out_plus)
out_minus = self.bn(out_minus)
out_plus = self._activate(out_plus)
out_minus = self._activate(out_minus)
return (out_plus + out_minus) / 2.
###########################################################################
# exact split
###########################################################################
def spe_forward(self, x):
out = self.module(x) # [B, C_out, H, W]
if self.has_bn:
self.bn.eval() # fix running mean/variance
out = self.bn(out)
# calculate bn_coff
bn_coff = 1. / torch.sqrt(self.bn.running_var + 1e-5) * self.bn.weight
bn_coff = bn_coff.view(1, -1, 1, 1) # [1, C_out, 1, 1]
first_run = (len(self.S) == 0)
# calculate 2nd order derivative of the activation
nabla2_out = self._d2_actv(out) # [B, C_out, H, W]
patches = self.get_conv_patches(x)
B, H, W, C_in, KH, KW = patches.size()
C_out = out.shape[1]
D = C_in * KH * KW
x = patches.view(B, H, W, D)
device = self.get_device()
auxs = [] # separate calculations for each neuron for space efficiency
for neuron_idx in range(C_out):
c = bn_coff[:, neuron_idx:neuron_idx+1, :, :] if self.has_bn else 1.
l = c * x
if first_run:
S = Variable(torch.zeros(D, D).to(device), requires_grad=True) # [H_in, H_in]
self.S.append(S)
else:
S = self.S[neuron_idx]
aux = l.view(-1, D).mm(S).unsqueeze(1).bmm(l.view(-1, D, 1)).squeeze(-1) # (Bx)S(Bx^T), [B*H*W,1]
aux = aux.view(B, 1, H, W)
auxs.append(aux)
auxs = torch.cat(auxs, 1) # [B, C_out, H, W]
auxs = auxs * nabla2_out # [B, C_out, H, W]
out = self._activate(out) + auxs
return out
def spe_eigen(self, avg_over=1.):
A = np.array([item.grad.data.cpu().numpy() for item in self.S]) # [C_out, D, D]
A /= avg_over
A = (A + np.transpose(A, [0, 2, 1])) / 2
w, v = np.linalg.eig(A) # [C_out, K], [C_out, D, K]
w = np.real(w)
v = np.real(v)
min_idx = np.argmin(w, axis=1)
w_min = np.min(w, axis=1) # [C_out,]
v_min = v[np.arange(w_min.shape[0]), :, min_idx] # [C_out, D]
self.w = w_min
self.v = v_min
device = self.get_device()
self.w = torch.FloatTensor(w_min).to(device)
self.v = torch.FloatTensor(v_min).to(device)
self.v = self.v.view(*self.module.weight.data.shape)
del A
## below are for copying weights and actual splitting
def get_n_neurons(self):
return self.module.weight.data.shape[0]
def random_split(self, C_new):
if C_new == 0:
return 0, None
C_out, C_in, kh, kw = self.module.weight.shape
idx = np.random.choice(C_out, C_new)
device = self.get_device()
delta1 = F.normalize(torch.randn(C_new, C_in, kh, kw).to(device), p=2, dim=-1)
delta2 = F.normalize(torch.randn(C_new, C_in, kh, kw).to(device), p=2, dim=-1)
delta1 = delta1 * 1e-2
delta2 = delta2 * 1e-2
idx = torch.LongTensor(idx).to(device)
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+C_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
new_layer.weight.data[C_out:, ...] = self.module.weight.data[idx, ...]
new_layer.weight.data[idx, ...] += delta1
new_layer.weight.data[C_out:, ...] -= delta2
if self.has_bias:
new_layer.bias.data[:C_out, ...] = self.module.bias.data.clone()
new_layer.bias.data[C_out:, ...] = self.module.bias.data[idx]
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(C_out+C_new).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.weight.data[C_out:] = self.bn.weight.data[idx]
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.bias.data[C_out:] = self.bn.bias.data[idx]
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_mean.data[C_out:] = self.bn.running_mean.data[idx]
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.running_var.data[C_out:] = self.bn.running_var.data[idx]
self.bn = new_bn
return C_new, idx
def rdinit_grow_output(self):
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+1,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
self.module = new_layer
def rdinit_grow_input(self):
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in+1,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:,:C_in, ...] = self.module.weight.data.clone()
self.module = new_layer
def active_split(self, threshold):
idx = torch.nonzero((self.w <= threshold).float()).view(-1)
C_new = idx.shape[0]
if C_new == 0:
return 0, None
C_out, C_in, kh, kw = self.module.weight.shape
device = self.get_device()
delta = self.v[idx, ...] * 1e-2
delta = delta.view(C_new, C_in, kh, kw)
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=C_out+C_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer
new_layer.weight.data[:C_out, ...] = self.module.weight.data.clone()
new_layer.weight.data[C_out:, ...] = self.module.weight.data[idx, ...]
new_layer.weight.data[idx, ...] += delta
new_layer.weight.data[C_out:, ...] -= delta
if self.has_bias:
new_layer.bias.data[:C_out, ...] = self.module.bias.data.clone()
new_layer.bias.data[C_out:, ...] = self.module.bias.data[idx]
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(C_out+C_new).to(device)
new_bn.weight.data[:C_out] = self.bn.weight.data.clone()
new_bn.weight.data[C_out:] = self.bn.weight.data[idx]
new_bn.bias.data[:C_out] = self.bn.bias.data.clone()
new_bn.bias.data[C_out:] = self.bn.bias.data[idx]
new_bn.running_mean.data[:C_out] = self.bn.running_mean.data.clone()
new_bn.running_mean.data[C_out:] = self.bn.running_mean.data[idx]
new_bn.running_var.data[:C_out] = self.bn.running_var.data.clone()
new_bn.running_var.data[C_out:] = self.bn.running_var.data[idx]
self.bn = new_bn
return C_new, idx
def passive_split(self, idx):
C_new = idx.shape[0]
C_out, C_in, _, _ = self.module.weight.shape
device = self.get_device()
new_layer = nn.Conv2d(in_channels=C_in+C_new,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
new_layer.weight.data[:, :C_in, ...] = self.module.weight.data.clone()
new_layer.weight.data[:, C_in:, ...] = self.module.weight.data[:, idx, ...] / 2.
new_layer.weight.data[:, idx, ...] /= 2.
if self.has_bias:
new_layer.bias.data = self.module.bias.data.clone()
self.module = new_layer
def spffn_active_grow(self, threshold):
idx = torch.nonzero((self.w <= threshold).float()).view(-1)
C_out, C_in, kh, kw = self.module.weight.shape
c1 = C_out - self.eout
c3 = C_in - self.ein
split_idx, new_idx = idx[idx < c1], idx[idx >= c1]
n_split = split_idx.shape[0]
n_new = new_idx.shape[0]
c2 = c1 + n_split
device = self.get_device()
delta = self.v[split_idx, ...]
new_layer = nn.Conv2d(in_channels=C_in,
out_channels=c1+n_split+n_new,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias).to(device)
# for current layer [--original--c1--split_new--c2--add new--]
old_W = self.module.weight.data.clone()
try:
old_W[:, C_in - self.ein:, :, :] = self.vni.clone()
except:
pass
try:
old_W[C_out-self.eout:, :C_in-self.ein, :, :] = self.vno.clone()
except:
pass
new_layer.weight.data[:c1, ...] = old_W[:c1,...]
if n_split > 0:
new_layer.weight.data[c1:c2, ...] = old_W[split_idx, ...]
new_layer.weight.data[split_idx,:c3,...] += delta
new_layer.weight.data[c1:c2:,:c3,...] -= delta
if n_new > 0:
new_layer.weight.data[c2:, ...] = old_W[new_idx, ...]
if self.has_bias:
old_b = self.module.bias.data.clone()
new_layer.bias.data[:c1, ...] = old_b[:c1,...].clone()
if n_split > 0:
new_layer.bias.data[c1:c2, ...] = old_b[split_idx]
if n_new > 0:
new_layer.bias.data[c2:,...] = 0.
self.module = new_layer
# for batchnorm layer
if self.has_bn:
new_bn = nn.BatchNorm2d(c1+n_split+n_new).to(device)
new_bn.weight.data[:c1] = self.bn.weight.data[:c1].clone()
new_bn.bias.data[:c1] = self.bn.bias.data[:c1].clone()
new_bn.running_mean.data[:c1] = self.bn.running_mean.data[:c1].clone()
new_bn.running_var.data[:c1] = self.bn.running_var.data[:c1].clone()
if n_split > 0:
new_bn.weight.data[c1:c2] = self.bn.weight.data[split_idx]
new_bn.bias.data[c1:c2] = self.bn.bias.data[split_idx]
new_bn.running_mean.data[c1:c2] = self.bn.running_mean.data[split_idx]
new_bn.running_var.data[c1:c2] = self.bn.running_var.data[split_idx]
if n_new > 0:
new_bn.weight.data[c2:] = self.bn.weight.data[new_idx]
new_bn.bias.data[c2:] = self.bn.bias.data[new_idx]
new_bn.running_mean.data[c2:] = self.bn.running_mean.data[new_idx]
new_bn.running_var.data[c2:] = self.bn.running_var.data[new_idx]
self.bn = new_bn
return n_split+n_new, split_idx, new_idx
def spffn_passive_grow(self, split_idx, new_idx):
n_split = split_idx.shape[0] if split_idx is not None else 0
n_new = new_idx.shape[0] if new_idx is not None else 0
C_out, C_in, _, _ = self.module.weight.shape
if self.groups != 1:
C_in = C_out
device = self.get_device()
c1 = C_in-self.ein
if n_split == 0 and n_new == self.ein:
return
if self.groups != 1:
self.groups = c1 + n_split + n_new
C_out = self.groups
new_layer = nn.Conv2d(in_channels=c1+n_split+n_new,
out_channels=C_out,
kernel_size=(self.kh, self.kw),
stride=(self.dh, self.dw),
padding=(self.ph, self.pw),
bias=self.has_bias, groups = self.groups).to(device)
c2 = c1 + n_split
if self.has_bias:
new_layer.bias.data = self.module.bias.data.clone()
if self.groups != 1:
new_layer.weight.data[:c1,:,...] = self.module.weight.data[:c1,:,...].clone()
else:
new_layer.weight.data[:,:c1,...] = self.module.weight.data[:,:c1,...].clone()
if n_split > 0:
if self.groups == 1:
new_layer.weight.data[:,c1:c2,:,:] = self.module.weight.data[:,split_idx,:,:] / 2.
new_layer.weight.data[:,split_idx,...] /= 2.
else:
new_layer.weight.data[c1:c2, :,...] = self.module.weight.data[split_idx, :,...]
if self.groups != 1:
new_bn = nn.BatchNorm2d(C_out).to(device)
out = C_out - n_new - n_split
out1 = out + n_split
out2 = out1 + n_new
new_bn.weight.data[:out] = self.bn.weight.data.clone()[:out]
new_bn.bias.data[:out] = self.bn.bias.data.clone()[:out]
new_bn.running_mean.data[:out] = self.bn.running_mean.data.clone()[:out]
new_bn.running_var.data[:out] = self.bn.running_var.data.clone()[:out]
if n_split > 0:
out1 = out + n_split
new_bn.weight.data[out:out1] = self.bn.weight.data[split_idx]
new_bn.bias.data[out:out1] = self.bn.bias.data[split_idx]
new_bn.running_mean.data[out:out1] = self.bn.running_mean.data[split_idx]
new_bn.running_var.data[out:out1] = self.bn.running_var.data[split_idx]
if n_new > 0:
new_bn.weight.data[out1:out2] = self.bn.weight.data[new_idx]
new_bn.bias.data[out1:out2] = self.bn.bias.data[new_idx]
new_bn.running_mean.data[out1:out2] = self.bn.running_mean.data[new_idx]
new_bn.running_var.data[out1:out2] = self.bn.running_var.data[new_idx]
self.bn = new_bn
if n_new > 0:
if self.groups != 1:
new_layer.weight.data[c2:,:,...] = self.module.weight.data[new_idx, :,...]
else:
new_layer.weight.data[:,c2:,...] = self.module.weight.data[:,new_idx,...]
self.module = new_layer
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import copy
import time
from pydoc import locate
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
from crlapi.sl.architectures.firefly_vgg import sp
from crlapi.sl.architectures.firefly_vgg.models import sp_vgg
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
# --- Firefly Implementation. Since we do not plan to extend this method,
# --- everything is self-contained here.
class Firefly(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models = []
self.config = clmodel_args
self.verbose = True
def build_initial_net(self, task, **model_args):
# only support the custom VGG backbone for now
model, next_layers, layers_to_split = \
sp_vgg('vgg19',
n_classes=task.n_classes,
dimh=model_args['n_channels'],
method='fireflyn')
# Hacky AF
model.next_layers = next_layers
model.layers_to_split = layers_to_split
return model
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
if len(self.models)==0:
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
elif (task.task_descriptor() % self.config.grow_every) == 0:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
print('growing')
base_gr = self.config.model.grow_ratio
grow_ratio = (base_gr * task.task_descriptor() + 1) / (base_gr * (task.task_descriptor() - 1) + 1) - 1
n_pre = sum(np.prod(x.shape) for x in model.parameters())
added = self.split(model, training_loader, grow_ratio)
n_post = sum(np.prod(x.shape) for x in model.parameters())
assert n_post > n_pre
print(f'from {n_pre} to {n_post}')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
flops_per_input = self.count_flops(task, model)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config["device"]
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config["max_epochs"])+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config["max_epochs"]):
# Make sure model is ready for train
model.train()
#Training loop
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Loss {validation_loss:.4}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
return self
# --------------------------------------------
# Firefly specific methods (from Classifier)
# --------------------------------------------
def spffn_forward(self, net, x, alpha):
for layer in net:
#if isinstance(layer, sp.SpModule) and layer.can_split:
prev_x = x.cpu().data.numpy()
if isinstance(layer, sp.SpModule):
x = layer.spffn_forward(x, alpha=alpha)
else:
x = layer(x)
return x.view(x.shape[0], -1)
def spffn_loss_fn(self, net, x, y, alpha=-1):
scores = self.spffn_forward(net, x, alpha=alpha)
loss = F.cross_entropy(scores, y)
return loss
## -- firefly new split -- ##
def spffn(self, net, loader, n_batches):
v_params = []
for i, layer in enumerate(net):
if isinstance(layer, sp.SpModule):
enlarge_in = (i > 0)
enlarge_out = (i < len(net)-1)
net[i].spffn_add_new(enlarge_in=enlarge_in, enlarge_out=enlarge_out)
net[i].spffn_reset()
if layer.can_split:
v_params += [net[i].v]
if enlarge_in:
v_params += [net[i].vni]
if enlarge_out:
v_params += [net[i].vno]
opt_v = torch.optim.RMSprop(nn.ParameterList(v_params), lr=1e-3, momentum=0.1, alpha=0.9)
self.device = next(iter(net.parameters())).device
torch.cuda.empty_cache()
n_batches = 0
for i, (x, y) in enumerate(loader):
n_batches += 1
x, y = x.to(self.device), y.to(self.device)
loss = self.spffn_loss_fn(net, x, y)
opt_v.zero_grad()
loss.backward()
for layer in net:
if isinstance(layer, sp.SpModule):
layer.spffn_penalty()
opt_v.step()
self.config.model.granularity = 1
alphas = np.linspace(0, 1, self.config.model.granularity*2+1)
for alpha in alphas[1::2]:
for x, y in loader:
x, y = x.to(self.device), y.to(self.device)
loss = self.spffn_loss_fn(net, x, y, alpha=1.0)
opt_v.zero_grad()
loss.backward()
# for i in self.layers_to_split:
for i in net.layers_to_split:
net[i].spffn_update_w(self.config.model.granularity * n_batches, output = False)
# --------------------------------------------
# Firefly specific methods (from SpNet)
# --------------------------------------------
def clear(self, net):
for layer in net:
if isinstance(layer, sp.SpModule):
layer.clear()
def get_num_elites(self, net, grow_ratio):
n = 0
# for i in self.layers_to_split:
for i in net.layers_to_split:
n += net[i].module.weight.shape[0]
self.n_elites = int(n * grow_ratio)
def get_num_elites_group(self, net, group_num, grow_ratio):
for g in range(group_num):
n = 0
for i in self.layers_to_split_group[g]:
n += net[i].module.weight.shape[0]
try:
self.n_elites_group[g] = int(n * grow_ratio)
except:
self.n_elites_group = {}
self.n_elites_group[g] = int(n * grow_ratio)
def sp_threshold(self, net):
# ws, wi = torch.sort(torch.cat([net[i].w for i in self.layers_to_split]).reshape(-1))
ws, wi = torch.sort(torch.cat([net[i].w for i in net.layers_to_split]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites]
return threshold
def sp_threshold_group(self, net, group_num):
# ws, wi = torch.sort(torch.cat([net[i].w for i in self.layers_to_split_group[group_num]]).reshape(-1))
ws, wi = torch.sort(torch.cat([net[i].w for i in net.layers_to_split_group[group_num]]).reshape(-1))
total= ws.shape[0]
threshold = ws[self.n_elites_group[group_num]]
return threshold
def split(self, net, loader, grow_ratio, n_batches=-1, split_method='fireflyn'):
self.num_group = 1# if self.config.backbne != 'mobile' else 2
if split_method not in ['random', 'exact', 'fast', 'firefly', 'fireflyn']:
raise NotImplementedError
if self.verbose:
print('[INFO] start splitting ...')
start_time = time.time()
net.eval()
if self.num_group == 1:
self.get_num_elites(net, grow_ratio)
else:
self.get_num_elites_group(net, grow_ratio, self.num_group)
split_fn = {
#'exact': self.spe,
#'fast': self.spf,
#'firefly': self.spff,
'fireflyn': self.spffn,
}
if split_method != 'random':
split_fn[split_method](net, loader, n_batches)
n_neurons_added = {}
if split_method == 'random':
# n_layers = len(self.layers_to_split)
n_layers = len(net.layers_to_split)
n_total_neurons = 0
threshold = 0.
# for l in self.layers_to_split:
for l in net.layers_to_split:
n_total_neurons += net[l].get_n_neurons()
n_grow = int(n_total_neurons * grow_ratio)
n_new1 = np.random.choice(n_grow, n_layers, replace=False)
n_new1 = np.sort(n_new1)
n_news = []
for i in range(len(n_new1) - 1):
if i == 0:
n_news.append(n_new1[i])
n_news.append(n_new1[i + 1] - n_new1[i])
else:
n_news.append(n_new1[i + 1] - n_new1[i])
n_news[-1] += 1
# for i, n_new_ in zip(reversed(self.layers_to_split), n_news):
for i, n_new_ in zip(reversed(net.layers_to_split), n_news):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
n_new, idx = net[i].random_split(n_new_)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
net[j].passive_split(idx)
elif split_method == 'fireflyn':
if self.num_group == 1:
threshold = self.sp_threshold(net)
# for i in reversed(self.layers_to_split):
for i in reversed(net.layers_to_split):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
if self.num_group != 1:
group = self.total_group[i]
threshold = self.sp_threshold_group(net, group)
n_new, split_idx, new_idx = net[i].spffn_active_grow(threshold)
sp_new = split_idx.shape[0] if split_idx is not None else 0
n_neurons_added[i] = (sp_new, n_new-sp_new)
if net[i].kh == 1:
isfirst = True
else:
isfirst = False
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
print('passive', net[j].module.weight.shape)
net[j].spffn_passive_grow(split_idx, new_idx)
else:
threshold= self.sp_threshold()
# actual splitting
# for i in reversed(self.layers_to_split):
for i in reversed(net.layers_to_split):
if isinstance(net[i], sp.SpModule) and net[i].can_split:
n_new, idx = net[i].active_split(threshold)
n_neurons_added[i] = n_new
if n_new > 0: # we have indeed splitted this layer
# for j in self.next_layers[i]:
for j in net.next_layers[i]:
net[j].passive_split(idx)
net.train()
self.clear(net) # cleanup auxiliaries
end_time = time.time()
if self.verbose:
print('[INFO] splitting takes %10.4f sec. Threshold value is %10.9f' % (
end_time - start_time, threshold))
if split_method == 'fireflyn':
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows (sp %d | new %d)' % (x, y1, y2) for x, (y1, y2) in n_neurons_added.items()]))
else:
print('[INFO] number of added neurons: \n%s\n' % \
'\n'.join(['-- %d grows %d neurons' % (x, y) for x, y in n_neurons_added.items()]))
return n_neurons_added
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
class IndexDataset(torch.utils.data.Dataset):
def __init__(self, og_dataset):
self.og_dataset = og_dataset
def __getitem__(self, index):
data, target = self.og_dataset[index]
return data, target, index
def __len__(self):
return len(self.og_dataset)
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AdaBoost(Finetune):
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs) * self.model_weights.reshape((-1,) + (1,) * outs[0].ndim)
out = out.mean(0)
return out
def compute_errors(self, loader, model):
unshuffled_loader = torch.utils.data.DataLoader(
loader.dataset, batch_size=loader.batch_size, drop_last=False, shuffle=False)
device=self.config.device
model.to(device)
model.eval()
# --- Upweighting
err = []
# eval mode
with torch.no_grad():
for x, y in unshuffled_loader:
x, y = x.to(device), y.to(device)
err += [~model(x).argmax(1).eq(y)]
err = torch.cat(err).float() # (DS, )
return err
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
og_training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
training_loader = torch.utils.data.DataLoader(
IndexDataset(og_training_loader.dataset),
batch_size=og_training_loader.batch_size,
shuffle=True
)
to_print = []
# --- step 1 : Initialize the observation weights uniformly
ds_len = len(og_training_loader.dataset)
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
for around in range(self.config.n_rounds):
# --- 2. a) Fit new classifier on weighted data
# init model
model_args=self.config.model
model, best_model = [self.build_initial_net(task,**model_args) for _ in range(2)]
flops_per_input = self.count_flops(task, model)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
epoch = 0
while True: # Run until convergence
epoch += 1
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y, idx) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
weight_x = sample_weights[idx]
n += y.size(0)
# apply transformations
x = train_aug(raw_x)
predicted = model(x)
loss = F.cross_entropy(predicted, y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
# Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print(f"\t Round {around}. Found best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience or epoch == self.config.max_epochs:
break
del model
# --- Step 2 b) Compute the new classifier errors
all_errs = self.compute_errors(og_training_loader, best_model) # 1, ds_size
assert all_errs.shape == (ds_len, )
cls_err = (all_errs * sample_weights).sum() / sample_weights.sum()
# --- Step 2 c) Compute the new classifier weight
K = task.n_classes
cls_alpha = torch.log((K - 1) * (1 - cls_err) / cls_err)
# --- Step 2 d) Update the sample weights
sample_weights = sample_weights * torch.exp(cls_alpha * all_errs)
sample_weights /= sample_weights.sum()
print(f'sample weights min {sample_weights.min():.6f}\t max {sample_weights.max():.6f} \t median {sample_weights.median():.6f}')
print(torch.multinomial(sample_weights, ds_len, replacement=True).bincount().bincount())
# store best model
self.models.append(best_model)
cls_alpha = cls_alpha.reshape(1)
# store classifier weights
if not hasattr(self, 'model_weights'):
self.model_weights = cls_alpha
else:
self.model_weights = torch.cat((self.model_weights, cls_alpha))
print(self.model_weights)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
to_print += [fill(accs) + '\t' + str(ensemble)]
for item in to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', len(self.models) * flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AggEnsemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs).sum(0)
return out
def _validation_loop(self,nets,device,dataloader):
[net.eval() for net in nets]
[net.to(device) for net in nets]
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in dataloader:
x,y=x.to(device),y.to(device)
predicted=0
for net in nets:
predicted += net(x)
loss=F.cross_entropy(predicted,y)
loss_values.append(loss.item())
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
net = net.train()
return {"loss":loss,"accuracy":accuracy}
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(model) for model in self.models]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_models = [copy.deepcopy(model) for model in models]
best_loss, best_acc = 1e10, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted = 0.
for model in models:
predicted += model(x)
loss = F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc = nb_ok/x.size()[0]
accuracy = acc
loss_ = loss.item()
training_accuracy += accuracy
training_loss += loss_
n += x.size(0)
n_fwd_samples += x.size(0)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
out=self._validation_loop(models,device,validation_loader)
validation_loss, validation_accuracy = out["loss"], out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
if best_acc is None or validation_accuracy > (best_acc):
best_acc = validation_accuracy
for model_idx in range(self.config.k):
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience:
break
# overwrite the best models
self.models = nn.ModuleList(best_models)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class BaggingSampler(torch.utils.data.Sampler):
""" Simulate a Dataset Sampled with Replacement """
def __init__(self, indices, real_ds_size):
self.size = real_ds_size
self.indices = indices
weights = torch.zeros(size=(self.size,)).float()
weights[self.indices] = 1
self.weights = weights
# do this here so that each epoch sees same sample dist
samples = torch.multinomial(weights, self.size, replacement=True)
self.samples = samples
unique_samples = samples.unique()
counts = samples.bincount().bincount()
assert (counts * torch.arange(counts.size(0)))[1:].sum().item() == self.size
print(counts, unique_samples.size(0), self.indices.size(0))
for ss in unique_samples:
assert (ss == unique_samples).sum() > 0
def __iter__(self):
samples = self.samples
samples = samples[torch.randperm(samples.size(0))]
# RESAMPLING
samples = torch.multinomial(self.weights, self.size, replacement=True)
for sample in samples:
yield sample.item()
class Bagging(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs).mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(self.models[-i]) for i in range(self.config.k)]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
ds_len = len(training_loader.dataset)
training_loaders = []
# build boosted loaders
for _ in range(self.config.k):
all_idx = torch.arange(ds_len)
idx = torch.multinomial(
torch.ones_like(all_idx).float(),
int(self.config.subsample_p * ds_len),
replacement=False
)
sampler = BaggingSampler(idx, ds_len)
loader = torch.utils.data.DataLoader(
training_loader.dataset,
batch_size=training_loader.batch_size,
sampler=sampler
)
training_loaders += [loader]
best_models = [copy.deepcopy(model) for model in models]
best_losses, best_accs = [1e10] * self.config.k, [0] * self.config.k
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, items in enumerate(zip(*training_loaders)):
xs, ys = [], []
for item in items:
x, y = item
x, y = x.to(device), y.to(device)
xs += [train_aug(x)]
ys += [y]
xs = torch.stack(xs)
ys = torch.stack(ys)
loss, acc = 0, 0
for model_idx in range(self.config.k):
model, x, y = models[model_idx], xs[model_idx], ys[model_idx]
predicted = model(x)
loss += F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc += nb_ok/x.size()[0]
accuracy = acc / self.config.k
loss_ = loss.item() / self.config.k
training_accuracy += accuracy
training_loss += loss_
n += xs.size(1)
n_fwd_samples += xs.size(1)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
outs = [self._validation_loop(model,device,validation_loader) for model in models]
validation_losses = [x['loss'] for x in outs]
validation_accuracies = [x['accuracy'] for x in outs]
validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
for model_idx in range(self.config.k):
if validation_accuracies[model_idx] > best_accs[model_idx]:
print("\tFound best model at epoch ",epoch, '\t', model_idx)
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
best_accs[model_idx] = validation_accuracies[model_idx]
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
for best_model in best_models:
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', np.mean(best_accs), 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import time
import copy
import numpy as np
from pydoc import locate
class IndexDataset(torch.utils.data.Dataset):
""" Wrapper that additionally returns the index for each sample """
def __init__(self, og_dataset):
self.og_dataset = og_dataset
def __getitem__(self, index):
data, target = self.og_dataset[index]
return data, target, index
def __len__(self):
return len(self.og_dataset)
class BoostingSampler(torch.utils.data.Sampler):
""" Upsample points based on sample weight """
def __init__(self, weights):
self.weights = weights
def __iter__(self):
assert -1e-5 < self.weights.sum().item() - 1 < 1e-5
samples = torch.multinomial(self.weights, self.weights.size(0), replacement=True)
if not hasattr(self, 'epoch'):
print('sampling with replacement counts', samples.bincount().bincount())
self.epoch = 0
else:
self.epoch += 1
for sample in samples:
yield sample.item()
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class AdaBoost(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
self.prog_pred_stats = []
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs) * self.model_alphas.reshape((-1,) + (1,) * outs[0].ndim)
u_out = torch.stack(outs)
print('diff weighted / unw', (out.sum(0).argmax(-1) != u_out.sum(0).argmax(-1)).float().mean())
prog_pred = out.cumsum(0).argmax(-1)
diff_from_last = prog_pred[-1, :] != prog_pred
diff_from_last = torch.flip(diff_from_last, dims=(0,)) # n_models, bs : with oldest model as idx = 0
last_conseq_steps_with_same_pred = (diff_from_last.int().cumsum(0) == 0).int().sum(0)
useful_steps = len(self.models) - last_conseq_steps_with_same_pred + 1
self.prog_pred_stats += [useful_steps.bincount(minlength=len(self.models) + 1)]
return out.sum(0)
"""
# --- actually let's pick the most confident model
out = u_out
max_prob = F.softmax(out,-1).max(-1)[0] # n_models, BS
model_idx = max_prob.argmax(0) #BS,
N_CLS = out.size(-1)
idx = torch.arange(model_idx.size(0)).cuda() * len(self.models) + model_idx
out = out.transpose(1,0) # BS, n_models, C
out = out.reshape(-1,N_CLS)[idx].reshape(-1,N_CLS)
return out
# ----
#return u_out.sum(0)
#return out.sum(0)
"""
def weighted_validation_loop(self,net,device, dataloader, weights):
""" weight loss and accuracy using sample specific weights """
net = net.eval()
# Return indices for the dataset
loader = torch.utils.data.DataLoader(
IndexDataset(dataloader.dataset),
batch_size=dataloader.batch_size,
shuffle=False
)
ds_len = len(dataloader.dataset)
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y,idx) in enumerate(loader):
x, y, idx = x.to(device),y.to(device), idx.to(device)
weight_x = weights[idx]
predicted=net(x)
loss = F.cross_entropy(predicted,y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
loss_values.append(loss.item())
acc += (predicted.argmax(1).eq(y).float() * weight_x).sum()
loss=np.mean(loss_values)
net = net.train()
return {"loss":loss,"accuracy":acc.item()}
def _all_validation_loop(self, device, dataloader,task):
""" weight loss and accuracy using sample specific weights """
self.get_prediction_net(task)
ds_len = len(dataloader.dataset)
acc = 0
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y) in enumerate(dataloader):
x, y= x.to(device),y.to(device)
out = []
for model in self.models:
out += [model(x)]
out = torch.stack(out).argmax(-1)
acc += (out == y.view(1,-1)).int().max(0)[0].float().sum().item()
return acc / ds_len
def compute_errors(self, loader, models):
""" given a loader and a list of models, returns a per_model x per_sample error matrix """
unshuffled_loader = torch.utils.data.DataLoader(
loader.dataset, batch_size=loader.batch_size, drop_last=False, shuffle=False)
device=self.config.device
# --- Upweighting
all_errs = []
# eval mode
[x.eval() for x in models]
with torch.no_grad():
for x, y in unshuffled_loader:
x, y = x.to(device), y.to(device)
for i, model in enumerate(models):
if i == 0:
err = [~model(x).argmax(1).eq(y)]
else:
err += [~model(x).argmax(1).eq(y)]
err = torch.stack(err) # n_models, bs
all_errs += [err]
all_errs = torch.cat(all_errs, dim=1).float() # n_models, DS
return all_errs
def compute_model_and_sample_weights(self, err_matrix, task):
""" compound sample and model models w.r.t to each model's performance """
n_models, ds_len = err_matrix.size()
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
model_alphas = []
for model_idx in range(n_models):
model_err = err_matrix[model_idx]
weighted_model_err = (sample_weights * model_err).sum() / sample_weights.sum()
model_alpha = torch.log((1 - weighted_model_err) / weighted_model_err) + np.log(task.n_classes - 1)
model_alphas += [model_alpha.reshape(1)]
sample_weights = sample_weights * torch.exp(model_alpha * model_err)
sample_weights /= sample_weights.sum()
return sample_weights, model_alphas
def update(self, task, logger):
""" train model on new MB """
task_id = task.task_descriptor()
assert isinstance(task_id, int)
self.validation_outputs = None
if task_id == 0 or self.config.init == 'scratch':
# create model
model_args = self.config.model
model = self.build_initial_net(task,**model_args)
elif self.config.init == 'last':
model = copy.deepcopy(self.models[-1])
elif self.config.init == 'first':
model = copy.deepcopy(self.models[0])
# Creating datasets and loaders
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
og_training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
ds_len = len(og_training_loader.dataset)
# --- get per sample weights
if task_id == 0:
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(model)
print(f'new model has {n_params} params')
sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
model_alphas = []
err_matrix = val_err_matrix = None
else:
err_matrix = self.compute_errors(og_training_loader, self.models)
sample_weights, model_alphas = self.compute_model_and_sample_weights(err_matrix, task)
val_err_matrix = self.compute_errors(validation_loader, self.models)
val_sample_weights, val_model_alphas = self.compute_model_and_sample_weights(val_err_matrix, task)
print('tr sample weights',torch.multinomial(sample_weights, sample_weights.size(0), replacement=True).bincount().bincount())
print('val sample weights',torch.multinomial(val_sample_weights, val_sample_weights.size(0), replacement=True).bincount().bincount())
if self.config.compute_model_weights_on_val:
model_alphas = val_model_alphas
if self.config.boosting == 'weighting' or task_id == 0:
# sample normally, but weight each point
sampler = None
training_weights = sample_weights
#like ensembleat thispoint
#print('UNIFORM WEIGHTS')
#training_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
elif self.config.boosting == 'sampling':
# oversample points with high weight --> no need to upweight them
# print('UNIFORM WEIGHTS')
#sample_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
sampler = BoostingSampler(sample_weights)
training_weights = torch.zeros(size=(ds_len,)).fill_(1. / ds_len).to(self.config.device)
else:
raise ValueError
# Return indices for the dataset
training_loader = torch.utils.data.DataLoader(
IndexDataset(og_training_loader.dataset),
batch_size=og_training_loader.batch_size,
shuffle=sampler is None,
sampler=sampler
)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
# Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration = 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
start = time.time()
for i, (raw_x, y, idx) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
weight_x = training_weights[idx]
n += y.size(0)
# apply transformations
x = train_aug(raw_x)
predicted = model(x)
loss = F.cross_entropy(predicted, y, reduction='none')
loss = (loss * weight_x).mean() * ds_len
nb_ok = predicted.argmax(1).eq(y).sum().item()
accuracy = nb_ok/x.size(0)
training_accuracy += nb_ok
training_loss += loss.item()
logger.add_scalar("train/loss",loss.item(), iteration)
logger.add_scalar("train/accuracy",accuracy, iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
# Validation
epoch_time = time.time() - start
training_accuracy /= n
training_loss /= n
if task_id == 0 or self.config.validation == 'normal':
out=self._validation_loop(model,device,validation_loader)
elif self.config.validation == 'weighted':
out=self.weighted_validation_loop(model,device,validation_loader, val_sample_weights)
else:
raise ValueError
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar('training/one_epoch_time', epoch_time, epoch)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}\t Time {epoch_time:.4f}")
if patience_count == patience:
break
# Store best model
self.models.append(best_model)
# Before making predictions, we need to calculate the weight of the new model
if self.config.compute_model_weights_on_val:
final_loader, err_mat = validation_loader, val_err_matrix
else:
final_loader, err_mat = og_training_loader, err_matrix
new_model_err = self.compute_errors(final_loader, [best_model]) # (1, DS)
if err_mat is not None:
err_mat = torch.cat((err_mat, new_model_err))
else:
err_mat = new_model_err
_, model_alphas = self.compute_model_and_sample_weights(err_mat, task)
self.model_alphas = torch.cat(model_alphas)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
best=self._all_validation_loop(device,validation_loader,task)
print('among best ', best)
pred_stats = torch.stack(self.prog_pred_stats).sum(0).float()
pred_stats /= pred_stats.sum()
print('model weights', self.model_alphas)
print('pred stats', pred_stats)
for i in range(pred_stats.size(0)):
logger.add_scalar('model/prediction_depth', pred_stats[i].item(), i)
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import numpy as np
from pydoc import locate
from random import shuffle
from crlapi.core import CLModel
from fvcore.nn import FlopCountAnalysis as FCA
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi import instantiate_class,get_class,get_arguments
import torch.utils.data
class SupervisedCLModel(CLModel, nn.Module):
""" A CLmodel based on a pytorch model, for supervised task over dataset
Args:
CLModel ([type]): [description]
"""
def __init__(self):
nn.Module.__init__(self)
self.memory_training_set=None
self.memory_validation_set=None
def update(self, task, logger):
raise NotImplementedError
def get_prediction_net(self,task):
raise NotImplementedError
def count_flops(self, task, model=None):
if model is None:
model = self.get_prediction_model(task)
# don't mess up BN stats!
model = model.eval()
input = torch.FloatTensor(size=(1, *task.input_shape)).to(self.config['device']).normal_()
model = model.to(self.config['device'])
flops = FCA(model, input).total()
return flops
def _validation_loop(self,net,device,dataloader):
net = net.eval()
net.to(device)
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in dataloader:
x,y=x.to(device),y.to(device)
predicted=net(x)
loss=F.cross_entropy(predicted,y)
loss_values.append(loss.item())
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
net = net.train()
return {"loss":loss,"accuracy":accuracy}
def evaluate(self,task,logger,evaluation_args):
logger.message("Evaluating...")
evaluation_dataset = task.task_resources().make()
#Building dataloader for both
evaluation_loader = torch.utils.data.DataLoader(
evaluation_dataset,
batch_size=evaluation_args["batch_size"],
num_workers=evaluation_args["num_workers"],
)
# TODO: is deepcopy here necessary ?
evaluation_model=copy.deepcopy(self.get_prediction_net(task))
evaluation_model.eval()
device=evaluation_args["device"]
evaluation_model.to(device)
with torch.no_grad():
loss_values=[]
nb_ok=0
nb_total=0
for x,y in evaluation_loader:
x,y=x.to(device),y.to(device)
predicted=evaluation_model(x)
loss=F.cross_entropy(predicted,y).item()
nb_ok+=predicted.max(1)[1].eq(y).float().sum().item()
nb_total+=x.size()[0]
loss_values.append(loss)
evaluation_loss=np.mean(loss_values)
accuracy=nb_ok/nb_total
r={"loss":evaluation_loss,"accuracy":accuracy}
logger.debug(str(r))
return r
def build_initial_net(self,task,**model_args):
from importlib import import_module
classname=model_args["class_name"]
del model_args["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c(task, **model_args)
# -- Helpers
def get_train_and_validation_loaders(self, dataset):
val_size = int(len(dataset) * self.config.validation_proportion)
tr_size = len(dataset) - val_size
training_dataset, validation_dataset = torch.utils.data.random_split(dataset, [tr_size, val_size])
if self.config.train_replay_proportion>0.0:
if not self.memory_training_set is None:
l=int(len(self.memory_training_set)*self.config.train_replay_proportion)
m,_= torch.utils.data.random_split(self.memory_training_set,[l,len(self.memory_training_set)-l])
training_dataset=torch.utils.data.ConcatDataset([training_dataset,m])
if self.config.validation_replay_proportion>0.0:
if not self.memory_validation_set is None:
l=int(len(self.memory_validation_set)*self.config.validation_replay_proportion)
m,_= torch.utils.data.random_split(self.memory_validation_set,[l,len(self.memory_validation_set)-l])
validation_dataset=torch.utils.data.ConcatDataset([validation_dataset,m])
print("Training set size = ",len(training_dataset))
print("Validation set size = ",len(validation_dataset))
self.memory_training_set=training_dataset
self.memory_validation_set=validation_dataset
training_loader = torch.utils.data.DataLoader(
training_dataset,
batch_size=self.config.training_batch_size,
num_workers=self.config.training_num_workers,
persistent_workers=self.config.training_num_workers>0,
shuffle=True,
# pin_memory=self.config['device'] != 'cpu'
)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=self.config.validation_batch_size,
num_workers=self.config.validation_num_workers,
persistent_workers=self.config.validation_num_workers>0,
shuffle=False,
# pin_memory=self.config['device'] != 'cpu'
)
return training_loader,validation_loader
def get_optimizer(self, model_params):
c=get_class(self.config.optim)
args=get_arguments(self.config.optim)
return c(model_params,**args)
def get_train_augs(self):
if self.config.get('kornia_augs', None) is not None:
tfs = []
import kornia
for tf_cfg in self.config['kornia_augs']:
tf = locate(f'kornia.augmentation.{tf_cfg.name}')
args = dict(tf_cfg)
args.pop('name')
tfs += [tf(**args)]
tfs = nn.Sequential(*tfs)
else:
tfs = nn.Identity()
tfs = tfs.to(self.config['device'])
return tfs
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Ensemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
if self.config.vote:
votes = out.argmax(-1)
oh_votes = F.one_hot(votes, dim=-1, num_classes=out.size(-1))
vote_count = oh_votes.sum(0).float()
most_confident = out.max(0)[0].max(-1)[1]
# Break ties
vote_count[torch.arange(out.size(0)), most_confident] += 0.1
out = vote_count
else:
out = out.mean(0)
return out
def _all_validation_loop(self, device, dataloader,task):
""" weight loss and accuracy using sample specific weights """
self.get_prediction_net(task)
ds_len = len(dataloader.dataset)
acc = 0
with torch.no_grad():
loss_values=[]
acc = 0
for i, (x,y) in enumerate(dataloader):
x, y= x.to(device),y.to(device)
out = []
for model in self.models:
out += [model(x)]
out = torch.stack(out).argmax(-1)
acc += (out == y.view(1,-1)).int().max(0)[0].float().sum().item()
return acc / ds_len
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
best=self._all_validation_loop(device,validation_loader,task)
print('among best ', best)
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Ensemble(Finetune):
def get_prediction_net(self,task):
for i, model in enumerate(self.models):
model.eval()
self.models[i] = model.to(self.config.device)
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
out = out.mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(f'new model has {n_params} params')
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
prev_sample_weights = None
to_print = []
for around in range(self.config.n_rounds):
# start new round with the best model of the last one
if around > 0:
model = copy.deepcopy(best_model)
best_model = copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
epoch = 0
while True: # Run until convergence
epoch += 1
# Make sure model is ready for train
model.train()
# Training loop
training_loss=0.0
training_accuracy=0.0
n=0
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
# Validation
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print(f"\t Round {around}. Found best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if patience_count == patience or epoch == self.config.max_epochs:
break
self.models.append(best_model)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
to_print += [fill(accs) + '\t' + str(ensemble)]
for item in to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.finetune import Finetune
import copy
import numpy as np
from pydoc import locate
from itertools import chain
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class KEnsemble(Finetune):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.to_print = []
print(f'voting {self.config.vote}')
def get_prediction_net(self,task):
for model in self.models:
model.eval()
return self
def forward(self, x):
outs = []
for model in self.models:
outs += [model(x)]
out = torch.stack(outs)
out = F.softmax(out, dim=-1)
if self.config.vote:
votes = out.argmax(-1)
oh_votes = F.one_hot(votes, num_classes=out.size(-1))
vote_count = oh_votes.sum(0).float()
most_confident = out.max(0)[0].max(-1)[1]
# Break ties
vote_count[torch.arange(vote_count.size(0)), most_confident] += 0.1
out = vote_count
else:
out = out.mean(0)
return out
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
models = [self.build_initial_net(task,**model_args) for _ in range(self.config.k)]
n_params = sum(np.prod(x.shape) for x in models[0].parameters())
print(f'new model has {n_params} params')
else:
# get the last k models
models = [copy.deepcopy(model) for model in self.models]
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, models[0]) * self.config.k
# Creating datasets and loaders
training_loader, validation_loader = self.get_train_and_validation_loaders(training_dataset)
training_loaders = []
for i in range(self.config.k):
training_loaders += [torch.utils.data.DataLoader(
training_loader.dataset,
batch_size=training_loader.batch_size,
shuffle=True
)]
best_models = [copy.deepcopy(model) for model in models]
best_losses, best_accs = [1e10] * self.config.k, [0] * self.config.k
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
models = [model.to(device) for model in models]
optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))
#Launching training procedure
logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
[model.train() for model in models]
# Keep a single track of these for now
training_loss=0.0
training_accuracy=0.0
n=0
for i, items in enumerate(zip(*training_loaders)):
xs, ys = [], []
for item in items:
x, y = item
x, y = x.to(device), y.to(device)
xs += [train_aug(x)]
ys += [y]
xs = torch.stack(xs)
ys = torch.stack(ys)
loss, acc = 0, 0
for model_idx in range(self.config.k):
model, x, y = models[model_idx], xs[model_idx], ys[model_idx]
predicted = model(x)
loss += F.cross_entropy(predicted,y)
nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
acc += nb_ok/x.size()[0]
accuracy = acc / self.config.k
loss_ = loss.item() / self.config.k
training_accuracy += accuracy
training_loss += loss_
n += xs.size(1)
n_fwd_samples += xs.size(1)
logger.add_scalar("train/loss",loss_,iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
#Validation
training_accuracy /= i
training_loss /= i
outs = [self._validation_loop(model,device,validation_loader) for model in models]
validation_losses = [x['loss'] for x in outs]
validation_accuracies = [x['accuracy'] for x in outs]
validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
found_best = False
for model_idx in range(self.config.k):
if validation_accuracies[model_idx] > best_accs[model_idx]:
print("\tFound best model at epoch ",epoch, '\t', model_idx)
best_models[model_idx].load_state_dict(_state_dict(models[model_idx],"cpu"))
best_accs[model_idx] = validation_accuracies[model_idx]
found_best = True
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")
if found_best:
patience_count = 0
else:
patience_count += 1
if patience_count == patience:
break
# overwrite the best models
self.models = nn.ModuleList(best_models)
# Evaluate each model individually :
accs = []
for model in self.models:
accs += [self._validation_loop(model, device, validation_loader)['accuracy']]
self.prog_pred_stats = []
ensemble = self._validation_loop(self, device, validation_loader)['accuracy']
fill = lambda x : str(x) + (100 - len(str(x))) * ' '
self.to_print += [fill(accs) + '\t' + str(ensemble)]
for item in self.to_print: print(item)
logger.message("Training Done...")
logger.add_scalar('train/model_params', len(self.models) * sum([np.prod(x.shape) for x in model.parameters()]), 0)
# TODO: FIX! this is wrong
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6 * len(self.models), 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', np.mean(best_accs), 0)
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import torch
import numpy as np
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Finetune_Grow(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models=[]
self.config=clmodel_args
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
if len(self.models)==0:
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
elif (task.task_descriptor() % self.config['grow_every']) == 0:
print('growing')
model=copy.deepcopy(self.models[task.task_descriptor()-1])
model=model.grow(validation_loader,**self.config)
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
if getattr(self.config, 'init_from_scratch', False):
print('re-initializing the model')
def weight_reset(m):
try: m.reset_parameters()
except: pass
model.apply(weight_reset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
flops_per_input = self.count_flops(task, model)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Loss {validation_loss:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
logger.message("Training Done...")
return self
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from crlapi.core import CLModel
from crlapi.sl.clmodels.core import SupervisedCLModel
import time
import copy
import numpy as np
from pydoc import locate
def _state_dict(model, device):
sd = model.state_dict()
for k, v in sd.items():
sd[k] = v.to(device)
return sd
class Finetune(SupervisedCLModel):
def __init__(self, stream, clmodel_args):
super().__init__()
self.models = nn.ModuleList()
self.config=clmodel_args
def get_prediction_net(self,task):
if task.task_descriptor() is None:
model = self.models[-1]
else:
model = self.models[task.task_descriptor]
model.eval()
return model
def update(self, task, logger):
assert isinstance(task.task_descriptor(),int)
if len(self.models)==0 or getattr(self.config, 'init_from_scratch', False):
model_args=self.config.model
model=self.build_initial_net(task,**model_args)
n_params = sum(np.prod(x.shape) for x in model.parameters())
print(model)
print(f'new model has {n_params} params')
else:
model=copy.deepcopy(self.models[task.task_descriptor()-1])
logger.message("Building training dataset")
training_dataset = task.task_resources().make()
flops_per_input = self.count_flops(task, model)
# Creating datasets and loaders
training_loader,validation_loader = self.get_train_and_validation_loaders(training_dataset)
best_model=copy.deepcopy(model)
best_loss, best_acc = None, None
# Optionally create GPU training augmentations
train_aug = self.get_train_augs()
# Optinally use patience :)
patience = self.config.patience
patience_delta = self.config.patience_delta
patience_count = 0
device=self.config.device
model.to(device)
optimizer = self.get_optimizer(model.parameters())
#Launching training procedure
logger.message("Start training for "+str(self.config.max_epochs)+" epochs")
iteration, n_fwd_samples = 0, 0
for epoch in range(self.config.max_epochs):
# Make sure model is ready for train
model.train()
#Training loop
training_loss=0.0
training_accuracy=0.0
n=0
start = time.time()
for i, (raw_x, y) in enumerate(training_loader):
raw_x, y = raw_x.to(device), y.to(device)
n+=raw_x.size()[0]
# apply transformations
x = train_aug(raw_x)
predicted=model(x)
loss=F.cross_entropy(predicted,y)
nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
accuracy=nb_ok/x.size()[0]
training_accuracy+=nb_ok
training_loss+=loss.item()
logger.add_scalar("train/loss",loss.item(),iteration)
logger.add_scalar("train/accuracy",accuracy,iteration)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iteration += 1
n_fwd_samples += x.size(0)
#Validation
epoch_time = time.time() - start
training_accuracy/=n
training_loss/=n
out=self._validation_loop(model,device,validation_loader)
validation_loss,validation_accuracy=out["loss"],out["accuracy"]
logger.add_scalar('training/one_epoch_time', epoch_time, epoch)
logger.add_scalar("validation/loss",validation_loss,epoch)
logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
# Right now CV against accuracy
# if best_loss is None or validation_loss < (best_loss - patience_delta):
if best_acc is None or validation_accuracy > (best_acc + patience_delta):
print("\tFound best model at epoch ",epoch)
best_model.load_state_dict(_state_dict(model,"cpu"))
best_loss = validation_loss
best_acc = validation_accuracy
patience_count = 0
else:
patience_count += 1
logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}\t Time {epoch_time:.4f}")
if patience_count == patience:
break
self.models.append(best_model)
logger.message("Training Done...")
logger.add_scalar('train/model_params', sum([np.prod(x.shape) for x in model.parameters()]), 0)
logger.add_scalar('train/one_sample_megaflop', flops_per_input / 1e6, 0)
logger.add_scalar('train/total_megaflops', n_fwd_samples * flops_per_input / 1e6, 0)
logger.add_scalar('train/best_validation_accuracy', best_acc, 0)
return self
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import re
from datetime import datetime
import requests
def get_ad_archive_id(data):
"""
Extract ad_archive_id from ad_snapshot_url
"""
return re.search(r"/\?id=([0-9]+)", data["ad_snapshot_url"]).group(1)
class FbAdsLibraryTraversal:
default_url_pattern = (
"https://graph.facebook.com/{}/ads_archive?access_token={}&"
+ "fields={}&search_terms={}&ad_reached_countries={}&search_page_ids={}&"
+ "ad_active_status={}&limit={}"
)
default_api_version = "v14.0"
def __init__(
self,
access_token,
fields,
search_term,
country,
search_page_ids="",
ad_active_status="ALL",
after_date="1970-01-01",
page_limit=500,
api_version=None,
retry_limit=3,
):
self.page_count = 0
self.access_token = access_token
self.fields = fields
self.search_term = search_term
self.country = country
self.after_date = after_date
self.search_page_ids = search_page_ids
self.ad_active_status = ad_active_status
self.page_limit = page_limit
self.retry_limit = retry_limit
if api_version is None:
self.api_version = self.default_api_version
else:
self.api_version = api_version
def generate_ad_archives(self):
next_page_url = self.default_url_pattern.format(
self.api_version,
self.access_token,
self.fields,
self.search_term,
self.country,
self.search_page_ids,
self.ad_active_status,
self.page_limit,
)
return self.__class__._get_ad_archives_from_url(
next_page_url, after_date=self.after_date, retry_limit=self.retry_limit
)
@staticmethod
def _get_ad_archives_from_url(
next_page_url, after_date="1970-01-01", retry_limit=3
):
last_error_url = None
last_retry_count = 0
start_time_cutoff_after = datetime.strptime(after_date, "%Y-%m-%d").timestamp()
while next_page_url is not None:
response = requests.get(next_page_url)
response_data = json.loads(response.text)
if "error" in response_data:
if next_page_url == last_error_url:
# failed again
if last_retry_count >= retry_limit:
raise Exception(
"Error message: [{}], failed on URL: [{}]".format(
json.dumps(response_data["error"]), next_page_url
)
)
else:
last_error_url = next_page_url
last_retry_count = 0
last_retry_count += 1
continue
filtered = list(
filter(
lambda ad_archive: ("ad_delivery_start_time" in ad_archive)
and (
datetime.strptime(
ad_archive["ad_delivery_start_time"], "%Y-%m-%d"
).timestamp()
>= start_time_cutoff_after
),
response_data["data"],
)
)
if len(filtered) == 0:
# if no data after the after_date, break
next_page_url = None
break
yield filtered
if "paging" in response_data:
next_page_url = response_data["paging"]["next"]
else:
next_page_url = None
@classmethod
def generate_ad_archives_from_url(cls, failure_url, after_date="1970-01-01"):
"""
if we failed from error, later we can just continue from the last failure url
"""
return cls._get_ad_archives_from_url(failure_url, after_date=after_date)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
from collections import Counter
def get_operators():
"""
Feel free to add your own 'operator' here;
The input will be:
generator_ad_archives: a generator of array of ad_archvie
args: extra arguments passed in from CLI
is_verbose: check this for debugging information
"""
return {
"count": count_ads,
"save": save_to_file,
"save_to_csv": save_to_csv,
"start_time_trending": count_start_time_trending,
}
def count_ads(generator_ad_archives, args, is_verbose=False):
"""
Count how many ad_archives match your query
"""
count = 0
for ad_archives in generator_ad_archives:
count += len(ad_archives)
if is_verbose:
print("counting %d" % count)
print("Total number of ads match the query: {}".format(count))
def save_to_file(generator_ad_archives, args, is_verbose=False):
"""
Save all retrieved ad_archives to the file; each ad_archive will be
stored in JSON format in a single line;
"""
if len(args) != 1:
raise Exception("save action requires exact 1 param: output file")
with open(args[0], "w+") as file:
count = 0
for ad_archives in generator_ad_archives:
for data in ad_archives:
file.write(json.dumps(data))
file.write("\n")
count += len(ad_archives)
if is_verbose:
print("Items wrote: %d" % count)
print("Total number of ads wrote: %d" % count)
def save_to_csv(generator_ad_archives, args, fields, is_verbose=False):
"""
Save all retrieved ad_archives to the output file. Each ad_archive will be
stored as a row in the CSV
"""
if len(args) != 1:
raise Exception("save_to_csv action takes 1 argument: output_file")
delimiter = ","
total_count = 0
output = fields + "\n"
output_file = args[0]
for ad_archives in generator_ad_archives:
total_count += len(ad_archives)
if is_verbose:
print("Items processed: %d" % total_count)
for ad_archive in ad_archives:
for field in list(fields.split(delimiter)):
if field in ad_archive:
value = ad_archive[field]
if (type(value) == list and type(value[0]) == dict) or type(
value
) == dict:
value = json.dumps(value)
elif type(value) == list:
value = delimiter.join(value)
output += (
'"' + value.replace("\n", "").replace('"', "") + '"' + delimiter
)
else:
output += delimiter
output = output.rstrip(",") + "\n"
with open(output_file, "w") as csvfile:
csvfile.write(output)
print("Successfully wrote data to file: %s" % output_file)
def count_start_time_trending(generator_ad_archives, args, is_verbose=False):
"""
output the count trending of ads by start date;
Accept one parameters:
output_file: path to write the csv
"""
if len(args) != 1:
raise Exception("start_time_trending action takes 1 arguments: output_file")
total_count = 0
output_file = args[0]
date_to_count = Counter({})
for ad_archives in generator_ad_archives:
total_count += len(ad_archives)
if is_verbose:
print("Item processed: %d" % total_count)
start_dates = list(
map(
lambda data: datetime.datetime.strptime(
data["ad_delivery_start_time"], "%Y-%m-%d"
).strftime("%Y-%m-%d"),
ad_archives,
)
)
date_to_count.update(start_dates)
with open(output_file, "w") as csvfile:
csvfile.write("date, count\n")
for date in date_to_count.keys():
csvfile.write("%s, %s\n" % (date, date_to_count[date]))
print("Successfully wrote data to file: %s" % output_file)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from iso3166 import countries
supported_countries = [
"AT",
"BE",
"BG",
"CA",
"CY",
"CZ",
"DE",
"DK",
"EE",
"ES",
"FI",
"FR",
"GB",
"GR",
"HR",
"HU",
"IE",
"IL",
"IN",
"IT",
"LT",
"LU",
"LV",
"MT",
"NL",
"PL",
"PT",
"RO",
"SE",
"SI",
"SK",
"UA",
"US",
]
valid_query_fields = [
"ad_creation_time",
"ad_creative_body",
"ad_creative_bodies",
"ad_creative_link_caption",
"ad_creative_link_captions",
"ad_creative_link_description",
"ad_creative_link_descriptions",
"ad_creative_link_title",
"ad_creative_link_titles",
"ad_delivery_start_time",
"ad_delivery_stop_time",
"ad_snapshot_url",
"currency",
"delivery_by_region",
"demographic_distribution",
"bylines",
"id",
"impressions",
"languages",
"page_id",
"page_name",
"potential_reach",
"publisher_platforms",
"region_distribution",
"spend",
]
def get_country_code(country_str):
"""
Convert the country input to valid country code
"""
global supported_countries
try:
country = countries.get(country_str)
except Exception:
country = None
if not country or country.alpha2 not in supported_countries:
return None
return country.alpha2
def is_valid_fields(field):
"""
The Facebook Ads Library API has a list of supported fields
"""
global valid_query_fields
return field in valid_query_fields
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from fb_ads_library_api import FbAdsLibraryTraversal
from fb_ads_library_api_operators import get_operators, save_to_csv
from fb_ads_library_api_utils import get_country_code, is_valid_fields
def get_parser():
parser = argparse.ArgumentParser(
description="The Facebook Ads Library API CLI Utility"
)
parser.add_argument(
"-t",
"--access-token",
help="The Facebook developer access token",
required=True,
)
parser.add_argument(
"-f",
"--fields",
help="Fields to retrieve from the Ad Library API",
required=True,
type=validate_fields_param,
)
parser.add_argument("-s", "--search-term", help="The term you want to search for")
parser.add_argument(
"-c",
"--country",
help="Comma-separated country code (no spaces)",
required=True,
type=validate_country_param,
)
parser.add_argument(
"--search-page-ids", help="The specific Facebook Page you want to search"
)
parser.add_argument(
"--ad-active-status",
help="Filter by the current status of the ads at the moment the script runs",
)
parser.add_argument(
"--after-date", help="Only return ads that started delivery after this date"
)
parser.add_argument("--batch-size", type=int, help="Batch size")
parser.add_argument(
"--retry-limit",
type=int,
help="When an error occurs, the script will abort if it fails to get the same batch this amount of times",
)
parser.add_argument("-v", "--verbose", action="store_true")
actions = ",".join(get_operators().keys())
parser.add_argument(
"action", help="Action to take on the ads, possible values: %s" % actions
)
parser.add_argument(
"args", nargs=argparse.REMAINDER, help="The parameter for the specific action"
)
return parser
def validate_country_param(country_input):
if not country_input:
return ""
country_list = list(filter(lambda x: x.strip(), country_input.split(",")))
if not country_list:
raise argparse.ArgumentTypeError("Country cannot be empty")
valid_country_codes = list(map(lambda x: get_country_code(x), country_list))
invalid_inputs = {
key: value
for (key, value) in zip(country_list, valid_country_codes)
if value is None
}
if invalid_inputs:
raise argparse.ArgumentTypeError(
"Invalid/unsupported country code: %s" % (",".join(invalid_inputs.keys()))
)
else:
return ",".join(valid_country_codes)
def validate_fields_param(fields_input):
if not fields_input:
return False
fields_list = list(
filter(lambda x: x, map(lambda x: x.strip(), fields_input.split(",")))
)
if not fields_list:
raise argparse.ArgumentTypeError("Fields cannot be empty")
invalid_fields = list(filter(lambda x: not is_valid_fields(x), fields_list))
if not invalid_fields:
return ",".join(fields_list)
else:
raise argparse.ArgumentTypeError(
"Unsupported fields: %s" % (",".join(invalid_fields))
)
def main():
parser = get_parser()
opts = parser.parse_args()
if not opts.search_term and not opts.search_page_ids:
print("At least one must be set: --search-term, --search-page-ids")
sys.exit(1)
if not opts.search_term:
search_term = "."
else:
search_term = opts.search_term
api = FbAdsLibraryTraversal(
opts.access_token, opts.fields, search_term, opts.country
)
if opts.search_page_ids:
api.search_page_ids = opts.search_page_ids
if opts.ad_active_status:
api.ad_active_status = opts.ad_active_status
if opts.batch_size:
api.page_limit = opts.batch_size
if opts.retry_limit:
api.retry_limit = opts.retry_limit
if opts.after_date:
api.after_date = opts.after_date
generator_ad_archives = api.generate_ad_archives()
if opts.action in get_operators():
if opts.action == "save_to_csv":
save_to_csv(
generator_ad_archives, opts.args, opts.fields, is_verbose=opts.verbose
)
else:
get_operators()[opts.action](
generator_ad_archives, opts.args, is_verbose=opts.verbose
)
else:
print("Invalid 'action' value: %s" % opts.action)
sys.exit(1)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.