python_code
stringlengths 0
66.4k
|
---|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import argparse
import numpy as np
import resampy
from scikits.audiolab import Sndfile, Format
def load_wav(fname, rate=None):
fp = Sndfile(fname, 'r')
_signal = fp.read_frames(fp.nframes)
_signal = _signal.reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_best')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = Sndfile(fname, 'w', Format('wav'), signal.shape[1], rate)
fp.write_frames(signal)
fp.close()
def reEncodeAudio(audio_path, new_rate):
audio, audio_rate = load_wav(audio_path,new_rate)
save_wav(audio_path, audio, new_rate)
def main():
parser = argparse.ArgumentParser(description="re-encode all audios under a directory")
parser.add_argument("--audio_dir_path", type=str, required=True)
parser.add_argument("--new_rate", type=int, default=16000)
args = parser.parse_args()
audio_list = glob.glob(args.audio_dir_path + '/*.wav')
print "Total number of audios to re-encode: ", len(audio_list)
for audio_path in audio_list:
reEncodeAudio(os.path.join(args.audio_dir_path, audio_path), args.new_rate)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from torch.autograd import Variable
from tensorboardX import SummaryWriter
def create_optimizer(nets, opt):
(net_visual, net_audio) = nets
param_groups = [{'params': net_visual.parameters(), 'lr': opt.lr_visual},
{'params': net_audio.parameters(), 'lr': opt.lr_audio}]
if opt.optimizer == 'sgd':
return torch.optim.SGD(param_groups, momentum=opt.beta1, weight_decay=opt.weight_decay)
elif opt.optimizer == 'adam':
return torch.optim.Adam(param_groups, betas=(opt.beta1,0.999), weight_decay=opt.weight_decay)
def decrease_learning_rate(optimizer, decay_factor=0.94):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_factor
#used to display validation loss
def display_val(model, loss_criterion, writer, index, dataset_val, opt):
losses = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
loss = loss_criterion(output['binaural_spectrogram'], output['audio_gt'])
losses.append(loss.item())
else:
break
avg_loss = sum(losses)/len(losses)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss, index)
print('val loss: %.3f' % avg_loss)
return avg_loss
#parse arguments
opt = TrainOptions().parse()
opt.device = torch.device("cuda")
#construct data loader
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training clips = %d' % dataset_size)
#create validation set data loader if validation_on option is set
if opt.validation_on:
#temperally set to val to load val data
opt.mode = 'val'
data_loader_val = CreateDataLoader(opt)
dataset_val = data_loader_val.load_data()
dataset_size_val = len(data_loader_val)
print('#validation clips = %d' % dataset_size_val)
opt.mode = 'train' #set it back
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(comment=opt.name)
else:
writer = None
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
# set up optimizer
optimizer = create_optimizer(nets, opt)
# set up loss function
loss_criterion = torch.nn.MSELoss()
if(len(opt.gpu_ids) > 0):
loss_criterion.cuda(opt.gpu_ids[0])
# initialization
total_steps = 0
data_loading_time = []
model_forward_time = []
model_backward_time = []
batch_loss = []
best_err = float("inf")
for epoch in range(1, opt.niter+1):
torch.cuda.synchronize()
epoch_start_time = time.time()
if(opt.measure_time):
iter_start_time = time.time()
for i, data in enumerate(dataset):
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_loaded_time = time.time()
total_steps += opt.batchSize
# forward pass
model.zero_grad()
output = model.forward(data)
# compute loss
loss = loss_criterion(output['binaural_spectrogram'], Variable(output['audio_gt'], requires_grad=False))
batch_loss.append(loss.item())
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_forwarded_time = time.time()
# update optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(opt.measure_time):
iter_model_backwarded_time = time.time()
data_loading_time.append(iter_data_loaded_time - iter_start_time)
model_forward_time.append(iter_data_forwarded_time - iter_data_loaded_time)
model_backward_time.append(iter_model_backwarded_time - iter_data_forwarded_time)
if(total_steps // opt.batchSize % opt.display_freq == 0):
print('Display training progress at (epoch %d, total_steps %d)' % (epoch, total_steps))
avg_loss = sum(batch_loss) / len(batch_loss)
print('Average loss: %.3f' % (avg_loss))
batch_loss = []
if opt.tensorboard:
writer.add_scalar('data/loss', avg_loss, total_steps)
if(opt.measure_time):
print('average data loading time: ' + str(sum(data_loading_time)/len(data_loading_time)))
print('average forward time: ' + str(sum(model_forward_time)/len(model_forward_time)))
print('average backward time: ' + str(sum(model_backward_time)/len(model_backward_time)))
data_loading_time = []
model_forward_time = []
model_backward_time = []
print('end of display \n')
if(total_steps // opt.batchSize % opt.save_latest_freq == 0):
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_latest.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_latest.pth'))
if(total_steps // opt.batchSize % opt.validation_freq == 0 and opt.validation_on):
model.eval()
opt.mode = 'val'
print('Display validation results at (epoch %d, total_steps %d)' % (epoch, total_steps))
val_err = display_val(model, loss_criterion, writer, total_steps, dataset_val, opt)
print('end of display \n')
model.train()
opt.mode = 'train'
#save the model that achieves the smallest validation error
if val_err < best_err:
best_err = val_err
print('saving the best model (epoch %d, total_steps %d) with validation error %.3f\n' % (epoch, total_steps, val_err))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_best.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_best.pth'))
if(opt.measure_time):
iter_start_time = time.time()
if(epoch % opt.save_epoch_freq == 0):
print('saving the model at the end of epoch %d, total_steps %d' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_visual.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_audio.pth'))
#decrease learning rate 6% every opt.learning_rate_decrease_itr epochs
if(opt.learning_rate_decrease_itr > 0 and epoch % opt.learning_rate_decrease_itr == 0):
decrease_learning_rate(optimizer, opt.decay_factor)
print('decreased learning rate by ', opt.decay_factor)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import librosa
import argparse
import numpy as np
from numpy import linalg as LA
from scipy.signal import hilbert
from data.audioVisual_dataset import generate_spectrogram
import statistics as stat
def normalize(samples):
return samples / np.maximum(1e-20, np.max(np.abs(samples)))
def STFT_L2_distance(predicted_binaural, gt_binaural):
#channel1
predicted_spect_channel1 = librosa.core.stft(np.asfortranarray(predicted_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel1 = librosa.core.stft(np.asfortranarray(gt_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel1), axis=0)
predicted_realimag_channel1 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel1), axis=0)
gt_realimag_channel1 = np.concatenate((real, imag), axis=0)
channel1_distance = np.mean(np.power((predicted_realimag_channel1 - gt_realimag_channel1), 2))
#channel2
predicted_spect_channel2 = librosa.core.stft(np.asfortranarray(predicted_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel2 = librosa.core.stft(np.asfortranarray(gt_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel2), axis=0)
predicted_realimag_channel2 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel2), axis=0)
gt_realimag_channel2 = np.concatenate((real, imag), axis=0)
channel2_distance = np.mean(np.power((predicted_realimag_channel2 - gt_realimag_channel2), 2))
#sum the distance between two channels
stft_l2_distance = channel1_distance + channel2_distance
return float(stft_l2_distance)
def Envelope_distance(predicted_binaural, gt_binaural):
#channel1
pred_env_channel1 = np.abs(hilbert(predicted_binaural[0,:]))
gt_env_channel1 = np.abs(hilbert(gt_binaural[0,:]))
channel1_distance = np.sqrt(np.mean((gt_env_channel1 - pred_env_channel1)**2))
#channel2
pred_env_channel2 = np.abs(hilbert(predicted_binaural[1,:]))
gt_env_channel2 = np.abs(hilbert(gt_binaural[1,:]))
channel2_distance = np.sqrt(np.mean((gt_env_channel2 - pred_env_channel2)**2))
#sum the distance between two channels
envelope_distance = channel1_distance + channel2_distance
return float(envelope_distance)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--results_root', type=str, required=True)
parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
parser.add_argument('--real_mono', default=False, type=bool, help='whether the input predicted binaural audio is mono audio')
parser.add_argument('--normalization', default=False, type=bool)
args = parser.parse_args()
stft_distance_list = []
envelope_distance_list = []
audioNames = os.listdir(args.results_root)
index = 1
for audio_name in audioNames:
if index % 10 == 0:
print "Evaluating testing example " + str(index) + " :", audio_name
#check whether input binaural is mono, replicate to two channels if it's mono
if args.real_mono:
mono_sound, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'mixed_mono.wav'), sr=args.audio_sampling_rate)
predicted_binaural = np.repeat(np.expand_dims(mono_sound, 0), 2, axis=0)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
else:
predicted_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'predicted_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
gt_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'input_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
gt_binaural = normalize(gt_binaural)
#get results for this audio
stft_distance_list.append(STFT_L2_distance(predicted_binaural, gt_binaural))
envelope_distance_list.append(Envelope_distance(predicted_binaural, gt_binaural))
index = index + 1
#print the results
print "STFT L2 Distance: ", stat.mean(stft_distance_list), stat.stdev(stft_distance_list), stat.stdev(stft_distance_list) / np.sqrt(len(stft_distance_list))
print "Average Envelope Distance: ", stat.mean(envelope_distance_list), stat.stdev(envelope_distance_list), stat.stdev(envelope_distance_list) / np.sqrt(len(envelope_distance_list))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import librosa
import numpy as np
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from data.audioVisual_dataset import generate_spectrogram
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
model.eval()
#load the audio to perform separation
audio, audio_rate = librosa.load(opt.input_audio_path, sr=opt.audio_sampling_rate, mono=False)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated
binaural_audio = np.zeros((audio.shape))
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < audio.shape[-1]:
sliding_window_end = sliding_window_start + samples_per_window
normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural
overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
#deal with the last segment
normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
#add the spatialized audio to reconstructed_binaural
binaural_audio[:,-samples_per_window:] = binaural_audio[:,-samples_per_window:] + reconstructed_binaural
overlap_count[:,-samples_per_window:] = overlap_count[:,-samples_per_window:] + 1
#divide aggregated predicted audio by their corresponding counts
predicted_binaural_audio = np.divide(binaural_audio, overlap_count)
#check output directory
if not os.path.isdir(opt.output_dir_root):
os.mkdir(opt.output_dir_root)
mixed_mono = (audio_channel1 + audio_channel2) / 2
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'predicted_binaural.wav'), predicted_binaural_audio, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'mixed_mono.wav'), mixed_mono, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'input_binaural.wav'), audio, opt.audio_sampling_rate)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--input_audio_path', required=True, help='path to the input audio file')
self.parser.add_argument('--video_frame_path', required=True, help='path to the input video frames')
self.parser.add_argument('--output_dir_root', type=str, default='test_output', help='path to the output files')
self.parser.add_argument('--input_audio_length', type=float, default=10, help='length of the testing video/audio')
self.parser.add_argument('--hop_size', default=0.05, type=float, help='the hop length to perform audio spatialization in a sliding window approach')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
self.mode = "test"
self.isTrain = False
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=50, help='frequency of displaying average loss')
self.parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--niter', type=int, default=1000, help='# of epochs to train')
self.parser.add_argument('--learning_rate_decrease_itr', type=int, default=-1, help='how often is the learning rate decreased by six percent')
self.parser.add_argument('--decay_factor', type=float, default=0.94, help='learning rate decay factor')
self.parser.add_argument('--tensorboard', type=bool, default=False, help='use tensorboard to visualize loss change ')
self.parser.add_argument('--measure_time', type=bool, default=False, help='measure time of different steps during training')
self.parser.add_argument('--validation_on', action='store_true', help='whether to test on validation set during training')
self.parser.add_argument('--validation_freq', type=int, default=100, help='frequency of testing on validation set')
self.parser.add_argument('--validation_batches', type=int, default=10, help='number of batches to test for validation')
self.parser.add_argument('--enable_data_augmentation', type=bool, default=True, help='whether to augment input frame')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
#optimizer arguments
self.parser.add_argument('--lr_visual', type=float, default=0.0001, help='learning rate for visual stream')
self.parser.add_argument('--lr_audio', type=float, default=0.001, help='learning rate for unet')
self.parser.add_argument('--optimizer', default='adam', type=str, help='adam or sgd for optimization')
self.parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
self.parser.add_argument('--weight_decay', default=0.0005, type=float, help='weights regularizer')
self.mode = "train"
self.isTrain = True
self.enable_data_augmentation = True
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--hdf5FolderPath', help='path to the folder that contains train.h5, val.h5 and test.h5')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='spatialAudioVisual', help='name of the experiment. It decides where to store models')
self.parser.add_argument('--checkpoints_dir', type=str, default='checkpoints/', help='models are saved here')
self.parser.add_argument('--model', type=str, default='audioVisual', help='chooses how datasets are loaded.')
self.parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
self.parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
self.parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
self.parser.add_argument('--audio_length', default=0.63, type=float, help='audio length, default 0.63s')
self.enable_data_augmentation = True
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.mode = self.mode
self.opt.isTrain = self.isTrain
self.opt.enable_data_augmentation = self.enable_data_augmentation
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
#I should process the opt here, like gpu ids, etc.
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
from .networks import VisualNet, AudioNet, weights_init
class ModelBuilder():
# builder for visual stream
def build_visual(self, weights=''):
pretrained = True
original_resnet = torchvision.models.resnet18(pretrained)
net = VisualNet(original_resnet)
if len(weights) > 0:
print('Loading weights for visual stream')
net.load_state_dict(torch.load(weights))
return net
#builder for audio stream
def build_audio(self, ngf=64, input_nc=2, output_nc=2, weights=''):
#AudioNet: 5 layer UNet
net = AudioNet(ngf, input_nc, output_nc)
net.apply(weights_init)
if len(weights) > 0:
print('Loading weights for audio stream')
net.load_state_dict(torch.load(weights))
return net
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from . import networks,criterion
from torch.autograd import Variable
class AudioVisualModel(torch.nn.Module):
def name(self):
return 'AudioVisualModel'
def __init__(self, nets, opt):
super(AudioVisualModel, self).__init__()
self.opt = opt
#initialize model
self.net_visual, self.net_audio = nets
def forward(self, input, volatile=False):
visual_input = input['frame']
audio_diff = input['audio_diff_spec']
audio_mix = input['audio_mix_spec']
audio_gt = Variable(audio_diff[:,:,:-1,:], requires_grad=False)
input_spectrogram = Variable(audio_mix, requires_grad=False, volatile=volatile)
visual_feature = self.net_visual(Variable(visual_input, requires_grad=False, volatile=volatile))
mask_prediction = self.net_audio(input_spectrogram, visual_feature)
#complex masking to obtain the predicted spectrogram
spectrogram_diff_real = input_spectrogram[:,0,:-1,:] * mask_prediction[:,0,:,:] - input_spectrogram[:,1,:-1,:] * mask_prediction[:,1,:,:]
spectrogram_diff_img = input_spectrogram[:,0,:-1,:] * mask_prediction[:,1,:,:] + input_spectrogram[:,1,:-1,:] * mask_prediction[:,0,:,:]
binaural_spectrogram = torch.cat((spectrogram_diff_real.unsqueeze(1), spectrogram_diff_img.unsqueeze(1)), 1)
output = {'mask_prediction': mask_prediction, 'binaural_spectrogram': binaural_spectrogram, 'audio_gt': audio_gt}
return output
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
def unet_conv(input_nc, output_nc, norm_layer=nn.BatchNorm2d):
downconv = nn.Conv2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(output_nc)
return nn.Sequential(*[downconv, downnorm, downrelu])
def unet_upconv(input_nc, output_nc, outermost=False, norm_layer=nn.BatchNorm2d):
upconv = nn.ConvTranspose2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
uprelu = nn.ReLU(True)
upnorm = norm_layer(output_nc)
if not outermost:
return nn.Sequential(*[upconv, upnorm, uprelu])
else:
return nn.Sequential(*[upconv, nn.Sigmoid()])
def create_conv(input_channels, output_channels, kernel, paddings, batch_norm=True, Relu=True, stride=1):
model = [nn.Conv2d(input_channels, output_channels, kernel, stride = stride, padding = paddings)]
if(batch_norm):
model.append(nn.BatchNorm2d(output_channels))
if(Relu):
model.append(nn.ReLU())
return nn.Sequential(*model)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
class VisualNet(nn.Module):
def __init__(self, original_resnet):
super(VisualNet, self).__init__()
layers = list(original_resnet.children())[0:-2]
self.feature_extraction = nn.Sequential(*layers) #features before conv1x1
def forward(self, x):
x = self.feature_extraction(x)
return x
class AudioNet(nn.Module):
def __init__(self, ngf=64, input_nc=2, output_nc=2):
super(AudioNet, self).__init__()
#initialize layers
self.audionet_convlayer1 = unet_conv(input_nc, ngf)
self.audionet_convlayer2 = unet_conv(ngf, ngf * 2)
self.audionet_convlayer3 = unet_conv(ngf * 2, ngf * 4)
self.audionet_convlayer4 = unet_conv(ngf * 4, ngf * 8)
self.audionet_convlayer5 = unet_conv(ngf * 8, ngf * 8)
self.audionet_upconvlayer1 = unet_upconv(1296, ngf * 8) #1296 (audio-visual feature) = 784 (visual feature) + 512 (audio feature)
self.audionet_upconvlayer2 = unet_upconv(ngf * 16, ngf *4)
self.audionet_upconvlayer3 = unet_upconv(ngf * 8, ngf * 2)
self.audionet_upconvlayer4 = unet_upconv(ngf * 4, ngf)
self.audionet_upconvlayer5 = unet_upconv(ngf * 2, output_nc, True) #outermost layer use a sigmoid to bound the mask
self.conv1x1 = create_conv(512, 8, 1, 0) #reduce dimension of extracted visual features
def forward(self, x, visual_feat):
audio_conv1feature = self.audionet_convlayer1(x)
audio_conv2feature = self.audionet_convlayer2(audio_conv1feature)
audio_conv3feature = self.audionet_convlayer3(audio_conv2feature)
audio_conv4feature = self.audionet_convlayer4(audio_conv3feature)
audio_conv5feature = self.audionet_convlayer5(audio_conv4feature)
visual_feat = self.conv1x1(visual_feat)
visual_feat = visual_feat.view(visual_feat.shape[0], -1, 1, 1) #flatten visual feature
visual_feat = visual_feat.repeat(1, 1, audio_conv5feature.shape[-2], audio_conv5feature.shape[-1]) #tile visual feature
audioVisual_feature = torch.cat((visual_feat, audio_conv5feature), dim=1)
audio_upconv1feature = self.audionet_upconvlayer1(audioVisual_feature)
audio_upconv2feature = self.audionet_upconvlayer2(torch.cat((audio_upconv1feature, audio_conv4feature), dim=1))
audio_upconv3feature = self.audionet_upconvlayer3(torch.cat((audio_upconv2feature, audio_conv3feature), dim=1))
audio_upconv4feature = self.audionet_upconvlayer4(torch.cat((audio_upconv3feature, audio_conv2feature), dim=1))
mask_prediction = self.audionet_upconvlayer5(torch.cat((audio_upconv4feature, audio_conv1feature), dim=1)) * 2 - 1
return mask_prediction
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseLoss(nn.Module):
def __init__(self):
super(BaseLoss, self).__init__()
def forward(self, preds, targets, weight=None):
if isinstance(preds, list):
N = len(preds)
if weight is None:
weight = preds[0].new_ones(1)
errs = [self._forward(preds[n], targets[n], weight[n])
for n in range(N)]
err = torch.mean(torch.stack(errs))
elif isinstance(preds, torch.Tensor):
if weight is None:
weight = preds.new_ones(1)
err = self._forward(preds, targets, weight)
return err
class L1Loss(BaseLoss):
def __init__(self):
super(L1Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.abs(pred - target))
class L2Loss(BaseLoss):
def __init__(self):
super(L2Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.pow(pred - target, 2))
class MSELoss(BaseLoss):
def __init__(self):
super(MSELoss, self).__init__()
def _forward(self, pred, target):
return F.mse_loss(pred, target)
class BCELoss(BaseLoss):
def __init__(self):
super(BCELoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy(pred, target, weight=weight)
class BCEWithLogitsLoss(BaseLoss):
def __init__(self):
super(BCEWithLogitsLoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy_with_logits(pred, target, weight=weight)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def CreateDataLoader(opt):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.model == 'audioVisual':
from data.audioVisual_dataset import AudioVisualDataset
dataset = AudioVisualDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.model)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return len(self.dataset)
def __iter__(self):
for i, data in enumerate(self.dataloader):
yield data
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os.path
import time
import librosa
import h5py
import random
import math
import numpy as np
import glob
import torch
from PIL import Image, ImageEnhance
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset
def normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples
def generate_spectrogram(audio):
spectro = librosa.core.stft(audio, n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(spectro), axis=0)
imag = np.expand_dims(np.imag(spectro), axis=0)
spectro_two_channel = np.concatenate((real, imag), axis=0)
return spectro_two_channel
def process_image(image, augment):
image = image.resize((480,240))
w,h = image.size
w_offset = w - 448
h_offset = h - 224
left = random.randrange(0, w_offset + 1)
upper = random.randrange(0, h_offset + 1)
image = image.crop((left, upper, left+448, upper+224))
if augment:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
return image
class AudioVisualDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.audios = []
#load hdf5 file here
h5f_path = os.path.join(opt.hdf5FolderPath, opt.mode+".h5")
h5f = h5py.File(h5f_path, 'r')
self.audios = h5f['audio'][:]
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
vision_transform_list = [transforms.ToTensor(), normalize]
self.vision_transform = transforms.Compose(vision_transform_list)
def __getitem__(self, index):
#load audio
audio, audio_rate = librosa.load(self.audios[index], sr=self.opt.audio_sampling_rate, mono=False)
#randomly get a start time for the audio segment from the 10s clip
audio_start_time = random.uniform(0, 9.9 - self.opt.audio_length)
audio_end_time = audio_start_time + self.opt.audio_length
audio_start = int(audio_start_time * self.opt.audio_sampling_rate)
audio_end = audio_start + int(self.opt.audio_length * self.opt.audio_sampling_rate)
audio = audio[:, audio_start:audio_end]
audio = normalize(audio)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#get the frame dir path based on audio path
path_parts = self.audios[index].strip().split('/')
path_parts[-1] = path_parts[-1][:-4] + '.mp4'
path_parts[-2] = 'frames'
frame_path = '/'.join(path_parts)
# get the closest frame to the audio segment
#frame_index = int(round((audio_start_time + audio_end_time) / 2.0 + 0.5)) #1 frame extracted per second
frame_index = int(round(((audio_start_time + audio_end_time) / 2.0 + 0.05) * 10)) #10 frames extracted per second
frame = process_image(Image.open(os.path.join(frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB'), self.opt.enable_data_augmentation)
frame = self.vision_transform(frame)
#passing the spectrogram of the difference
audio_diff_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 - audio_channel2))
audio_mix_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 + audio_channel2))
return {'frame': frame, 'audio_diff_spec':audio_diff_spec, 'audio_mix_spec':audio_mix_spec}
def __len__(self):
return len(self.audios)
def name(self):
return 'AudioVisualDataset'
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gym
import torch
from collections import deque, defaultdict
from gym import spaces
import numpy as np
from gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX
# Helper functions and wrappers
def _format_observation(obs):
obs = torch.tensor(obs)
return obs.view((1, 1) + obs.shape) # (...) -> (T,B,...).
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces['image']
def observation(self, observation):
return observation['image']
class Observation_WrapperSetup:
"""Environment wrapper to format observation items into torch."""
def __init__(self, gym_env, fix_seed=False, env_seed=1):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
self.episode_win = None
self.fix_seed = fix_seed
self.env_seed = env_seed
def initial(self):
initial_reward = torch.zeros(1, 1)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
initial_frame = _format_observation(self.gym_env.reset())
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
episode_win=self.episode_win,
carried_col = carried_col,
carried_obj = carried_obj)
def step(self, action):
frame, reward, done, _ = self.gym_env.step(action.item())
self.episode_step += 1
episode_step = self.episode_step
self.episode_return += reward
episode_return = self.episode_return
if done and reward > 0:
self.episode_win[0][0] = 1
else:
self.episode_win[0][0] = 0
episode_win = self.episode_win
if done:
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_observation(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step = episode_step,
episode_win = episode_win,
carried_col = carried_col,
carried_obj = carried_obj
)
def get_full_obs(self):
env = self.gym_env.unwrapped
full_grid = env.grid.encode()
full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([
OBJECT_TO_IDX['agent'],
COLOR_TO_IDX['red'],
env.agent_dir
])
return full_grid
def close(self):
self.gym_env.close()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Naive profiling using timeit."""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import datetime
import csv
import json
import logging
import os
import time
from typing import Dict
import git
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
# gathering git metadata
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(
commit=git_sha,
branch=None if repo.head.is_detached else repo.active_branch.name,
is_dirty=repo.is_dirty(),
path=repo.git_dir,
)
except git.InvalidGitRepositoryError:
git_data = None
# gathering slurm metadata
if "SLURM_JOB_ID" in os.environ:
slurm_env_keys = [k for k in os.environ if k.startswith("SLURM")]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace("SLURM_", "").replace("SLURMD_", "").lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(
date_start=date_start,
date_end=None,
successful=False,
git=git_data,
slurm=slurm_data,
env=os.environ.copy(),
)
class FileWriter:
def __init__(
self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = "~/palaas",
symlink_to_latest: bool = True,
):
if not xpid:
# make unique id
xpid = "{proc}_{unixtime}".format(
proc=os.getpid(), unixtime=int(time.time())
)
self.xpid = xpid
self._tick = 0
# metadata gathering
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# we need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other nasty stuff).
self.metadata["args"] = copy.deepcopy(xp_args)
self.metadata["xpid"] = self.xpid
formatter = logging.Formatter("%(message)s")
self._logger = logging.getLogger("palaas/out")
# to stdout handler
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# to file handler
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info("Creating log directory: %s", self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info("Found log directory: %s", self.basepath)
if symlink_to_latest:
# Add 'latest' as symlink unless it exists and is no symlink.
symlink = os.path.join(rootdir, "latest")
try:
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.exists(symlink):
os.symlink(self.basepath, symlink)
self._logger.info("Symlinked log directory: %s", symlink)
except OSError:
# os.remove() or os.symlink() raced. Don't do anything.
pass
self.paths = dict(
msg="{base}/out.log".format(base=self.basepath),
logs="{base}/logs.csv".format(base=self.basepath),
fields="{base}/fields.csv".format(base=self.basepath),
meta="{base}/meta.json".format(base=self.basepath),
)
self._logger.info("Saving arguments to %s", self.paths["meta"])
if os.path.exists(self.paths["meta"]):
self._logger.warning(
"Path to meta file already exists. " "Not overriding meta."
)
else:
self._save_metadata()
self._logger.info("Saving messages to %s", self.paths["msg"])
if os.path.exists(self.paths["msg"]):
self._logger.warning(
"Path to message file already exists. " "New data will be appended."
)
fhandle = logging.FileHandler(self.paths["msg"])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info("Saving logs data to %s", self.paths["logs"])
self._logger.info("Saving logs' fields to %s", self.paths["fields"])
if os.path.exists(self.paths["logs"]):
self._logger.warning(
"Path to log file already exists. " "New data will be appended."
)
with open(self.paths["fields"], "r") as csvfile:
reader = csv.reader(csvfile)
self.fieldnames = list(reader)[0]
else:
self.fieldnames = ["_tick", "_time"]
self._fieldfile = open(self.paths["fields"], "w")
self._fieldwriter = csv.writer(self._fieldfile)
self._logfile = open(self.paths["logs"], "a")
self._logwriter = csv.DictWriter(self._logfile, fieldnames=self.fieldnames)
def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log["_tick"] = self._tick
self._tick += 1
to_log["_time"] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
self._fieldwriter.writerow(self.fieldnames)
self._logger.info("Updated log fields: %s", self.fieldnames)
if to_log["_tick"] == 0:
self._logfile.write("# %s\n" % ",".join(self.fieldnames))
if verbose:
self._logger.info(
"LOG | %s",
", ".join(["{}: {}".format(k, to_log[k]) for k in sorted(to_log)]),
)
self._logwriter.writerow(to_log)
self._logfile.flush()
def close(self, successful: bool = True) -> None:
self.metadata["date_end"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
self.metadata["successful"] = successful
self._save_metadata()
for f in [self._logfile, self._fieldfile]:
f.close()
def _save_metadata(self) -> None:
with open(self.paths["meta"], "w") as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This file taken from
# https://github.com/deepmind/scalable_agent/blob/
# cd66d00914d56c8ba2f0615d9cdeefcb169a8d70/vtrace.py
# and modified.
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
"""
import collections
import torch
import torch.nn.functional as F
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""The environment class."""
import torch
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.bool)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Must be run with OMP_NUM_THREADS=1
import random
import argparse
import logging
import os
import threading
import time
import timeit
import traceback
import pprint
import typing
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
import gym
import gym_minigrid.wrappers as wrappers
from torch.distributions.normal import Normal
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
from env_utils import Observation_WrapperSetup, FrameStack
# Some Global Variables
# We start t* at 7 steps.
generator_batch = dict()
generator_batch_aux = dict()
generator_current_target = 7.0
generator_count = 0
# yapf: disable
parser = argparse.ArgumentParser(description='PyTorch Scalable Agent')
parser.add_argument('--env', type=str, default='MiniGrid-Empty-8x8-v0',
help='Gym environment.')
parser.add_argument('--mode', default='train',
choices=['train', 'test', 'test_render'],
help='Training or test mode.')
parser.add_argument('--xpid', default=None,
help='Experiment id (default: None).')
# Training settings.
parser.add_argument('--disable_checkpoint', action='store_true',
help='Disable saving checkpoint.')
parser.add_argument('--savedir', default='./experimentsMinigrid',
help='Root dir where experiment data will be saved.')
parser.add_argument('--total_frames', default=600000000, type=int, metavar='T',
help='Total environment frames to train for.')
parser.add_argument('--num_actors', default=4, type=int, metavar='N',
help='Number of actors (default: 4).')
parser.add_argument('--num_buffers', default=None, type=int,
metavar='N', help='Number of shared-memory buffers.')
parser.add_argument('--num_threads', default=4, type=int,
metavar='N', help='Number learner threads.')
parser.add_argument('--disable_cuda', action='store_true',
help='Disable CUDA.')
# Loss settings.
parser.add_argument('--entropy_cost', default=0.0005, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--generator_entropy_cost', default=0.05, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--baseline_cost', default=0.5, type=float,
help='Baseline cost/multiplier.')
parser.add_argument('--discounting', default=0.99, type=float,
help='Discounting factor.')
parser.add_argument('--reward_clipping', default='abs_one',
choices=['abs_one', 'soft_asymmetric', 'none'],
help='Reward clipping.')
# Optimizer settings.
parser.add_argument('--learning_rate', default=0.001, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--generator_learning_rate', default=0.002, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--alpha', default=0.99, type=float,
help='RMSProp smoothing constant.')
parser.add_argument('--momentum', default=0, type=float,
help='RMSProp momentum.')
parser.add_argument('--epsilon', default=0.01, type=float,
help='RMSProp epsilon.')
# Other Hyperparameters
parser.add_argument('--batch_size', default=8, type=int, metavar='B',
help='Learner batch size (default: 4).')
parser.add_argument('--generator_batch_size', default=32, type=int, metavar='BB',
help='Learner batch size (default: 4).')
parser.add_argument('--unroll_length', default=100, type=int, metavar='T',
help='The unroll length (time dimension; default: 64).')
parser.add_argument('--goal_dim', default=10, type=int,
help='Size of Goal Embedding')
parser.add_argument('--state_embedding_dim', default=256, type=int,
help='Dimension of the state embedding representation used in the student')
parser.add_argument('--generator_reward_negative', default= -0.1, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--generator_threshold', default=-0.5, type=float,
help='Threshold mean reward for wich scheduler increases difficulty')
parser.add_argument('--generator_counts', default=10, type=int,
help='Number of time before generator increases difficulty')
parser.add_argument('--generator_maximum', default=100, type=float,
help='Maximum difficulty')
parser.add_argument('--generator_reward_coef', default=1.0, type=float,
help='Coefficient for the generator reward')
# Map Layout
parser.add_argument('--fix_seed', action='store_true',
help='Fix the environment seed so that it is \
no longer procedurally generated but rather a layout every time.')
parser.add_argument('--env_seed', default=1, type=int,
help='The seed to set for the env if we are using a single fixed seed.')
parser.add_argument('--inner', action='store_true',
help='Exlucde outer wall')
parser.add_argument('--num_input_frames', default=1, type=int,
help='Number of input frames to the model and state embedding including the current frame \
When num_input_frames > 1, it will also take the previous num_input_frames - 1 frames as input.')
# Ablations and other settings
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument('--num_lstm_layers', default=1, type=int,
help='Lstm layers.')
parser.add_argument('--disable_use_embedding', action='store_true',
help='Disable embeddings.')
parser.add_argument('--no_extrinsic_rewards', action='store_true',
help='Only intrinsic rewards.')
parser.add_argument('--no_generator', action='store_true',
help='Use vanilla policy-deprecated')
parser.add_argument('--intrinsic_reward_coef', default=1.0, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--random_agent', action='store_true',
help='Use a random agent to test the env.')
parser.add_argument('--novelty', action='store_true',
help='Discount rewards based on times goal has been proposed.')
parser.add_argument('--novelty_bonus', default=0.1, type=float,
help='Bonus you get for proposing objects if novelty')
parser.add_argument('--novelty_coef', default=0.3, type=float,
help='Modulates novelty bonus if novelty')
parser.add_argument('--restart_episode', action='store_true',
help='Restart Episode when reaching intrinsic goal.')
parser.add_argument('--modify', action='store_true',
help='Modify Goal instead of having to reach the goal')
parser.add_argument('--no_boundary_awareness', action='store_true',
help='Remove Episode Boundary Awareness')
parser.add_argument('--generator_loss_form', type=str, default='threshold',
help='[threshold,dummy,gaussian, linear]')
parser.add_argument('--generator_target', default=5.0, type=float,
help='Mean target for Gassian and Linear Rewards')
parser.add_argument('--target_variance', default=15.0, type=float,
help='Variance for the Gaussian Reward')
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
# Take the mean over batch, sum over time.
return 0.5 * torch.sum(torch.mean(advantages ** 2, dim=1))
def compute_entropy_loss(logits):
# Regularizing Entropy Loss
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
entropy_per_timestep = torch.sum(-policy * log_policy, dim=-1)
return -torch.sum(torch.mean(entropy_per_timestep, dim=1))
def compute_policy_gradient_loss(logits, actions, advantages):
# Main Policy Loss
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
advantages.requires_grad = False
policy_gradient_loss_per_timestep = cross_entropy * advantages
return torch.sum(torch.mean(policy_gradient_loss_per_timestep, dim=1))
def act(
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
generator_model,
buffers: Buffers,
initial_agent_state_buffers, flags):
"""Defines and generates IMPALA actors in multiples threads."""
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
#gym_env = wrappers.FullyObsWrapper(gym_env)
if flags.num_input_frames > 1:
gym_env = FrameStack(gym_env, flags.num_input_frames)
env = Observation_WrapperSetup(gym_env, fix_seed=flags.fix_seed, env_seed=flags.env_seed)
env_output = env.initial()
initial_frame = env_output['frame']
agent_state = model.initial_state(batch_size=1)
generator_output = generator_model(env_output)
goal = generator_output["goal"]
agent_output, unused_state = model(env_output, agent_state, goal)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][0, ...] = generator_output[key]
buffers["initial_frame"][index][0, ...] = initial_frame
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout
for t in range(flags.unroll_length):
aux_steps = 0
timings.reset()
if flags.modify:
new_frame = torch.flatten(env_output['frame'], 2, 3)
old_frame = torch.flatten(initial_frame, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # Reached if the three elements of the frame are not the same.
reached_condition = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goal.long(),2)))
else:
agent_location = torch.flatten(env_output['frame'], 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(agent_output["action"].shape)
reached_condition = goal == agent_location
if reached_condition: # Generate new goal when reached intrinsic goal
if flags.restart_episode:
env_output = env.initial()
else:
env.episode_step = 0
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
if env_output['done'][0] == 1: # Generate a New Goal when episode finished
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state, goal)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][t + 1, ...] = generator_output[key]
buffers["initial_frame"][index][t + 1, ...] = initial_frame
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock()):
"""Returns a Batch with the history."""
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(t.to(device=flags.device, non_blocking=True)
for t in initial_agent_state)
timings.time("device")
return batch, initial_agent_state
def reached_goal_func(frames, goals, initial_frames = None, done_aux = None):
"""Auxiliary function which evaluates whether agent has reached the goal."""
if flags.modify:
new_frame = torch.flatten(frames, 2, 3)
old_frame = torch.flatten(initial_frames, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # reached if the three elements are not the same
reached = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goals.long(),2)))
if flags.no_boundary_awareness:
reached = reached.float() * (1 - done_aux.float())
return reached
else:
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(goals.shape)
return (goals == agent_location).float()
def learn(
actor_model, model, actor_generator_model, generator_model, batch, initial_agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, max_steps=100.0, lock=threading.Lock()
):
"""Performs a learning (optimization) step for the policy, and for the generator whenever the generator batch is full."""
with lock:
# Loading Batch
next_frame = batch['frame'][1:].float().to(device=flags.device)
initial_frames = batch['initial_frame'][1:].float().to(device=flags.device)
done_aux = batch['done'][1:].float().to(device=flags.device)
reached_goal = reached_goal_func(next_frame, batch['goal'][1:].to(device=flags.device), initial_frames = initial_frames, done_aux = done_aux)
intrinsic_rewards = flags.intrinsic_reward_coef * reached_goal
reached = reached_goal.type(torch.bool)
intrinsic_rewards = intrinsic_rewards*(intrinsic_rewards - 0.9 * (batch["episode_step"][1:].float()/max_steps))
learner_outputs, unused_state = model(batch, initial_agent_state, batch['goal'])
bootstrap_value = learner_outputs["baseline"][-1]
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
# Student Rewards
if flags.no_generator:
total_rewards = rewards
elif flags.no_extrinsic_rewards:
total_rewards = intrinsic_rewards
else:
total_rewards = rewards + intrinsic_rewards
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(total_rewards, -1, 1)
elif flags.reward_clipping == "soft_asymmetric":
squeezed = torch.tanh(total_rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = torch.where(total_rewards < 0, 0.3 * squeezed, squeezed) * 5.0
elif flags.reward_clipping == "none":
clipped_rewards = total_rewards
discounts = (~batch["done"]).float() * flags.discounting
clipped_rewards += 1.0 * (rewards>0.0).float()
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
# Student Loss
# Compute loss as a weighted sum of the baseline loss, the policy
# gradient loss and an entropy regularization term.
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
if torch.isnan(torch.mean(episode_returns)):
aux_mean_episode = 0.0
else:
aux_mean_episode = torch.mean(episode_returns).item()
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": aux_mean_episode,
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
"gen_rewards": None,
"gg_loss": None,
"generator_baseline_loss": None,
"generator_entropy_loss": None,
"mean_intrinsic_rewards": None,
"mean_episode_steps": None,
"ex_reward": None,
"generator_current_target": None,
}
if flags.no_generator:
stats["gen_rewards"] = 0.0,
stats["gg_loss"] = 0.0,
stats["generator_baseline_loss"] = 0.0,
stats["generator_entropy_loss"] = 0.0,
stats["mean_intrinsic_rewards"] = 0.0,
stats["mean_episode_steps"] = 0.0,
stats["ex_reward"] = 0.0,
stats["generator_current_target"] = 0.0,
scheduler.step()
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 40.0)
optimizer.step()
actor_model.load_state_dict(model.state_dict())
# Generator:
if not flags.no_generator:
global generator_batch
global generator_batch_aux
global generator_current_target
global generator_count
global goal_count_dict
# Loading Batch
is_done = batch['done']==1
reached = reached_goal.type(torch.bool)
if 'frame' in generator_batch.keys():
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][is_done].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][is_done].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][is_done].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][is_done].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.zeros(batch['goal'].shape)[is_done].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
else:
generator_batch['frame'] = (batch['initial_frame'][is_done]).float().to(device=flags.device) # Notice we use initial_frame from batch
generator_batch['goal'] = (batch['goal'][is_done]).to(device=flags.device)
generator_batch['episode_step'] = (batch['episode_step'][is_done]).float().to(device=flags.device)
generator_batch['generator_logits'] = (batch['generator_logits'][is_done]).float().to(device=flags.device)
generator_batch['reached'] = (torch.zeros(batch['goal'].shape)[is_done]).float().to(device=flags.device)
generator_batch['ex_reward'] = (batch['reward'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = (batch['carried_obj'][is_done]).float().to(device=flags.device)
generator_batch['carried_col'] = (batch['carried_col'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
if generator_batch['frame'].shape[0] >= flags.generator_batch_size: # Run Gradient step, keep batch residual in batch_aux
for key in generator_batch:
generator_batch_aux[key] = generator_batch[key][flags.generator_batch_size:]
generator_batch[key] = generator_batch[key][:flags.generator_batch_size].unsqueeze(0)
generator_outputs = generator_model(generator_batch)
generator_bootstrap_value = generator_outputs["generator_baseline"][-1]
# Generator Reward
def distance2(episode_step, reached, targ=flags.generator_target):
aux = flags.generator_reward_negative * torch.ones(episode_step.shape).to(device=flags.device)
aux += (episode_step >= targ).float() * reached
return aux
if flags.generator_loss_form == 'gaussian':
generator_target = flags.generator_target * torch.ones(generator_batch['episode_step'].shape).to(device=flags.device)
gen_reward = Normal(generator_target, flags.target_variance*torch.ones(generator_target.shape).to(device=flags.device))
generator_rewards = flags.generator_reward_coef * (2 + gen_reward.log_prob(generator_batch['episode_step']) - gen_reward.log_prob(generator_target)) * generator_batch['reached'] -1
elif flags.generator_loss_form == 'linear':
generator_rewards = (generator_batch['episode_step']/flags.generator_target * (generator_batch['episode_step'] <= flags.generator_target).float() + \
torch.exp ((-generator_batch['episode_step'] + flags.generator_target)/20.0) * (generator_batch['episode_step'] > flags.generator_target).float()) * \
2*generator_batch['reached'] - 1
elif flags.generator_loss_form == 'dummy':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'])).to(device=flags.device)
elif flags.generator_loss_form == 'threshold':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'], targ=generator_current_target)).to(device=flags.device)
if torch.mean(generator_rewards).item() >= flags.generator_threshold:
generator_count += 1
else:
generator_count = 0
if generator_count >= flags.generator_counts and generator_current_target<=flags.generator_maximum:
generator_current_target += 1.0
generator_count = 0
goal_count_dict *= 0.0
if flags.novelty:
frames_aux = torch.flatten(generator_batch['frame'], 2, 3)
frames_aux = frames_aux[:,:,:,0]
object_ids =torch.zeros(generator_batch['goal'].shape).long()
for i in range(object_ids.shape[1]):
object_ids[0,i] = frames_aux[0,i,generator_batch['goal'][0,i]]
goal_count_dict[object_ids[0,i]] += 1
bonus = (object_ids>2).float().to(device=flags.device) * flags.novelty_bonus
generator_rewards += bonus
if flags.reward_clipping == "abs_one":
generator_clipped_rewards = torch.clamp(generator_rewards, -1, 1)
if not flags.no_extrinsic_rewards:
generator_clipped_rewards = 1.0 * (generator_batch['ex_reward'] > 0).float() + generator_clipped_rewards * (generator_batch['ex_reward'] <= 0).float()
generator_discounts = torch.zeros(generator_batch['episode_step'].shape).float().to(device=flags.device)
goals_aux = generator_batch["goal"]
if flags.inner:
goals_aux = goals_aux.float()
goals_aux -= 2 * (torch.floor(goals_aux/generator_model.height))
goals_aux -= generator_model.height -1
goals_aux = goals_aux.long()
generator_vtrace_returns = vtrace.from_logits(
behavior_policy_logits=generator_batch["generator_logits"],
target_policy_logits=generator_outputs["generator_logits"],
actions=goals_aux,
discounts=generator_discounts,
rewards=generator_clipped_rewards,
values=generator_outputs["generator_baseline"],
bootstrap_value=generator_bootstrap_value,
)
# Generator Loss
gg_loss = compute_policy_gradient_loss(
generator_outputs["generator_logits"],
goals_aux,
generator_vtrace_returns.pg_advantages,
)
generator_baseline_loss = flags.baseline_cost * compute_baseline_loss(
generator_vtrace_returns.vs - generator_outputs["generator_baseline"]
)
generator_entropy_loss = flags.generator_entropy_cost * compute_entropy_loss(
generator_outputs["generator_logits"]
)
generator_total_loss = gg_loss + generator_entropy_loss +generator_baseline_loss
intrinsic_rewards_gen = generator_batch['reached']*(1- 0.9 * (generator_batch["episode_step"].float()/max_steps))
stats["gen_rewards"] = torch.mean(generator_clipped_rewards).item()
stats["gg_loss"] = gg_loss.item()
stats["generator_baseline_loss"] = generator_baseline_loss.item()
stats["generator_entropy_loss"] = generator_entropy_loss.item()
stats["mean_intrinsic_rewards"] = torch.mean(intrinsic_rewards_gen).item()
stats["mean_episode_steps"] = torch.mean(generator_batch["episode_step"]).item()
stats["ex_reward"] = torch.mean(generator_batch['ex_reward']).item()
stats["generator_current_target"] = generator_current_target
generator_scheduler.step()
generator_model_optimizer.zero_grad()
generator_total_loss.backward()
nn.utils.clip_grad_norm_(generator_model.parameters(), 40.0)
generator_model_optimizer.step()
actor_generator_model.load_state_dict(generator_model.state_dict())
if generator_batch_aux['frame'].shape[0]>0:
generator_batch = {key: tensor[:] for key, tensor in generator_batch_aux.items()}
else:
generator_batch = dict()
return stats
def create_buffers(obs_shape, num_actions, flags, width, height, logits_size) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
generator_baseline=dict(size=(T + 1,), dtype=torch.float32),
action=dict(size=(T + 1,), dtype=torch.int64),
episode_win=dict(size=(T + 1,), dtype=torch.int32),
generator_logits=dict(size=(T + 1, logits_size), dtype=torch.float32),
goal=dict(size=(T + 1,), dtype=torch.int64),
initial_frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
carried_col =dict(size=(T + 1,), dtype=torch.int64),
carried_obj =dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags):
"""Full training loop."""
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
#env = wrappers.FullyObsWrapper(env)
if flags.num_input_frames > 1:
env = FrameStack(env, flags.num_input_frames)
generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames)
model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers)
global goal_count_dict
goal_count_dict = torch.zeros(11).float().to(device=flags.device)
if flags.inner:
logits_size = (env.width-2)*(env.height-2)
else:
logits_size = env.width * env.height
buffers = create_buffers(env.observation_space.shape, model.num_actions, flags, env.width, env.height, logits_size)
model.share_memory()
generator_model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, free_queue, full_queue, model, generator_model, buffers,
initial_agent_state_buffers, flags))
actor.start()
actor_processes.append(actor)
learner_model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers).to(
device=flags.device
)
learner_generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
generator_model_optimizer = torch.optim.RMSprop(
learner_generator_model.parameters(),
lr=flags.generator_learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_frames) / flags.total_frames
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
generator_scheduler = torch.optim.lr_scheduler.LambdaLR(generator_model_optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
"gen_rewards",
"gg_loss",
"generator_entropy_loss",
"generator_baseline_loss",
"mean_intrinsic_rewards",
"mean_episode_steps",
"ex_reward",
"generator_current_target",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
frames, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, stats
timings = prof.Timings()
while frames < flags.total_frames:
timings.reset()
batch, agent_state = get_batch(flags, free_queue, full_queue, buffers,
initial_agent_state_buffers, timings)
stats = learn(model, learner_model, generator_model, learner_generator_model, batch, agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, env.max_steps)
timings.time("learn")
with lock:
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"generator_model_state_dict": generator_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"generator_model_optimizer_state_dict": generator_model_optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"generator_scheduler_state_dict": generator_scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while frames < flags.total_frames:
start_frames = frames
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
fps = (frames - start_frames) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"After %i frames: loss %f @ %.1f fps. %sStats:\n%s",
frames,
total_loss,
fps,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d frames.", frames)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class Generator(nn.Module):
"""Constructs the Teacher Policy which takes an initial observation and produces a goal."""
def __init__(self, observation_shape, width, height, num_input_frames, hidden_dim=256):
super(Generator, self).__init__()
self.observation_shape = observation_shape
self.height = height
self.width = width
self.env_dim = self.width * self.height
self.state_embedding_dim = 256
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = 3*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
K = self.num_channels # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
Y = 8 # number of output filters
L = 4 # number of convnet layers
E = 1 # output of last layer
in_channels = [K] + [M] * 4
out_channels = [M] * 3 + [E]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
self.out_dim = self.env_dim * 16 + self.obj_dim + self.col_dim
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if flags.inner:
self.aux_env_dim = (self.height-2) * (self.width-2)
else:
self.aux_env_dim = self.env_dim
self.baseline_teacher = init_(nn.Linear(self.aux_env_dim, 1))
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def convert_inner(self, goals):
"""Transform environment if using inner flag."""
goals = goals.float()
goals += 2*(1+torch.floor(goals/(self.height-2)))
goals += self.height - 1
goals = goals.long()
return goals
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, height, width, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs):
"""Main Function, takes an observation and returns a goal."""
x = inputs["frame"]
T, B, *_ = x.shape
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
x = torch.flatten(x, 0, 1) # Merge time and batch.
if flags.disable_use_embedding:
x = x.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2)], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
x = x.transpose(1, 3)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
x = self.extract_representation(x)
x = x.view(T * B, -1)
generator_logits = x.view(T*B, -1)
generator_baseline = self.baseline_teacher(generator_logits)
goal = torch.multinomial(F.softmax(generator_logits, dim=1), num_samples=1)
generator_logits = generator_logits.view(T, B, -1)
generator_baseline = generator_baseline.view(T, B)
goal = goal.view(T, B)
if flags.inner:
goal = self.convert_inner(goal)
return dict(goal=goal, generator_logits=generator_logits, generator_baseline=generator_baseline)
class MinigridNet(nn.Module):
"""Constructs the Student Policy which takes an observation and a goal and produces an action."""
def __init__(self, observation_shape, num_actions, state_embedding_dim=256, num_input_frames=1, use_lstm=False, num_lstm_layers=1):
super(MinigridNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
self.state_embedding_dim = state_embedding_dim
self.use_lstm = use_lstm
self.num_lstm_layers = num_lstm_layers
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.goal_dim = flags.goal_dim
self.agent_loc_dim = 10
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim + 1) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = (3+1+1+1+1)*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
self.embed_goal = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.goal_dim)
self.embed_agent_loc = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.agent_loc_dim)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.feat_extract = nn.Sequential(
init_(nn.Conv2d(in_channels=self.num_channels, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
)
self.fc = nn.Sequential(
init_(nn.Linear(32 + self.obj_dim + self.col_dim, self.state_embedding_dim)),
nn.ReLU(),
init_(nn.Linear(self.state_embedding_dim, self.state_embedding_dim)),
nn.ReLU(),
)
if use_lstm:
self.core = nn.LSTM(self.state_embedding_dim, self.state_embedding_dim, self.num_lstm_layers)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.policy = init_(nn.Linear(self.state_embedding_dim, self.num_actions))
self.baseline = init_(nn.Linear(self.state_embedding_dim, 1))
def initial_state(self, batch_size):
"""Initializes LSTM."""
if not self.use_lstm:
return tuple()
return tuple(torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2))
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs, core_state=(), goal=[]):
"""Main Function, takes an observation and a goal and returns and action."""
# -- [unroll_length x batch_size x height x width x channels]
x = inputs["frame"]
T, B, h, w, *_ = x.shape
# -- [unroll_length*batch_size x height x width x channels]
x = torch.flatten(x, 0, 1) # Merge time and batch.
goal = torch.flatten(goal, 0, 1)
# Creating goal_channel
goal_channel = torch.zeros_like(x, requires_grad=False)
goal_channel = torch.flatten(goal_channel, 1,2)[:,:,0]
for i in range(goal.shape[0]):
goal_channel[i,goal[i]] = 1.0
goal_channel = goal_channel.view(T*B, h, w, 1)
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
if flags.disable_use_embedding:
x = x.float()
goal = goal.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
goal = goal.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
# -- [B x H x W x K]
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2), goal_channel.float()], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
if flags.no_generator:
goal_emb = torch.zeros(goal_emb.shape, dtype=goal_emb.dtype, device=goal_emb.device, requires_grad = False)
x = x.transpose(1, 3)
x = self.feat_extract(x)
x = x.view(T * B, -1)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
union = torch.cat([x, carried_obj_emb, carried_col_emb], dim=1)
core_input = self.fc(union)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return dict(policy_logits=policy_logits, baseline=baseline, action=action), core_state
Net = MinigridNet
GeneratorNet = Generator
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces["image"]
def observation(self, observation):
return observation["image"]
def create_env(flags):
return Minigrid2Image(wrappers.FullyObsWrapper(gym.make(flags.env)))
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import torch
from torch import nn
from torch.nn import functional as F
class AdaptiveEmbedding(nn.Module):
""" An adaptive embedding module from "Adaptive Input Representations for
Neural Language Modeling" (https://arxiv.org/abs/1809.10853) """
def __init__(self, n_tokens, d_embed, d_proj, cutoffs, div_val=4):
super(AdaptiveEmbedding, self).__init__()
self.n_tokens = n_tokens
self.d_embed = d_embed
self.d_proj = d_proj
assert 0 < min(cutoffs) <= max(cutoffs) < n_tokens
self.cutoffs = cutoffs + [n_tokens]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
assert self.div_val > 1
assert len(self.cutoffs) > 1
self.emb_scale = d_proj ** 0.5
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
# embedding layers / projections
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Linear(d_emb_i, d_proj).weight)
def forward(self, indices):
param = self.emb_layers[0].weight.data
idx_flat = indices.contiguous().view(-1)
emb_flat = torch.zeros([idx_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
# for each cluster
for i in range(len(self.cutoffs)):
# find elements in that cluster
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (idx_flat >= l_idx) & (idx_flat < r_idx)
# if there are no elements, continue
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
# add embeddings from this cluster
idx_i = idx_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](idx_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat = emb_flat.type_as(emb_i) if emb_flat.dtype != emb_i.dtype else emb_flat # small hack for AMP-O1
emb_flat.index_copy_(0, indices_i, emb_i)
# reshape embeddings
embed = emb_flat.view(*indices.size(), self.d_proj)
# rescale embeddings
embed.mul_(self.emb_scale)
return embed
class ProjectedAdaptiveLogSoftmax(nn.Module):
""" An efficient softmax implementation from "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309). """
def __init__(self, n_tokens, d_embed, d_proj, cutoffs, div_val=4):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_tokens = n_tokens
self.d_embed = d_embed
self.d_proj = d_proj
assert 0 < min(cutoffs) <= max(cutoffs) < n_tokens
self.cutoffs = cutoffs + [n_tokens]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
assert self.div_val > 1
assert len(self.cutoffs) > 1
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# clusters parameters
self.cluster_proj = nn.Linear(self.d_embed, self.n_clusters)
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
# output layers / projections
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(nn.Linear(d_emb_i, d_proj).weight)
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
def _compute_logit(self, hidden, weight, bias, proj):
proj_hid = F.linear(hidden, proj.t().contiguous()) # TODO: .contiguous() not necessary?
logit = F.linear(proj_hid, weight, bias=bias)
return logit
def forward(self, hidden, target):
"""
Input:
- `hidden` FloatTensor(shape + (d_proj,))
- `target` LongTensor(shape)
Output:
- `nll` FloatTensor(shape)
"""
assert hidden.shape[-1] == self.d_proj
assert hidden.shape[:-1] == target.shape
shape = target.shape
hidden = hidden.view(-1, self.d_proj)
target = target.view(-1)
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_proj.weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_proj.bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
# head / cluster assignments
head_logit = self._compute_logit(hidden, weights[0], biases[0], self.out_projs[0])
head_logprob = F.log_softmax(head_logit.float(), dim=1)
# final log-probabilities
nll = torch.zeros_like(target, dtype=torch.float32, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
# for each cluster
for i in range(len(cutoff_values) - 1):
# select the target tokens in that cluster
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
# if there are not any, there is nothing to do
if indices_i.numel() == 0:
continue
# index in current cluster
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
# for targets in the head cluster, there is just the head score
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
# otherwise, we sum the cluster assignment (head) and target scores
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weights[i], biases[i], self.out_projs[i])
tail_logprob_i = F.log_softmax(tail_logit_i.float(), dim=1)
logprob_i = head_logprob_i[:, -i] + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
# populate output
nll.index_copy_(0, indices_i, -logprob_i)
offset += logprob_i.size(0)
return nll.view(shape)
def compute_dummy_loss(in_emb, out_emb):
# hack to fix adaptive ou/in with distributed code
dummy_loss = 0 * (
sum(x.weight[0, 0] for x in in_emb.emb_layers) +
sum(x[0, 0] for x in in_emb.emb_projs) +
sum(x[0, 0] for x in out_emb.out_projs) +
sum(x.weight[0, 0] for x in out_emb.out_layers) +
sum(x.bias[0] for x in out_emb.out_layers)
)
return dummy_loss
def build_adaptive_io(vocab_size, hidden_size, adapt_io_cutoffs,
adapt_io_divval, adapt_io_tied, **kargs):
in_emb = AdaptiveEmbedding(
vocab_size, hidden_size, hidden_size,
cutoffs=adapt_io_cutoffs,
div_val=adapt_io_divval)
out_emb = ProjectedAdaptiveLogSoftmax(
vocab_size, hidden_size, hidden_size,
cutoffs=adapt_io_cutoffs,
div_val=adapt_io_divval)
if adapt_io_tied:
for i in range(len(adapt_io_cutoffs) + 1):
out_emb.out_layers[i].weight = in_emb.emb_layers[i].weight
out_emb.out_projs[i] = in_emb.emb_projs[i]
return in_emb, out_emb
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# command-line arguments with their default values
PARAMS_CONFIG = {
# env-specific
'env_params': {
'--distributed': {
'action': 'store_true',
'default': False,
'help': 'enable distributed training.'
'(otherwise will use all available GPUs with dataparallel)',
'dest': 'distributed'
},
'--local_rank': {
'type': int,
'default': 0,
'help': 'used in distributed training',
'dest': 'local_rank'
},
},
# data-specific
'data_params': {
'--data': {
'type': str,
'default': 'data/text8',
'help': 'data location '
'(must contain train.txt, valid.txt and test.txt)',
'dest': 'data_path'
},
'--data-unit': {
'type': str,
'default': 'bpc',
'choices': ['bpc', 'ppl'],
'help': 'loss unit to log',
'dest': 'data_unit'
},
},
# model-specific
'model_params': {
'--hid-sz': {
'type': int,
'default': 256,
'help': 'hidden size (i.e. model size)',
'dest': 'hidden_size'
},
'--inner-hid-sz': {
'type': int,
'default': 1024,
'help': 'inner hidden size of FF layer',
'dest': 'inner_hidden_size'
},
'--nlayers': {
'type': int,
'default': 8,
'help': 'number of layers',
'dest': 'nb_layers'
},
'--block-sz': {
'type': int,
'default': 64,
'help': 'block size '
'(the length of sequence to process in parallel)',
'dest': 'block_size'
},
'--nheads': {
'type': int,
'default': 2,
'help': 'number of self-attention heads',
'dest': 'nb_heads'
},
'--attn-span': {
'type': int,
'default': 32,
'help': 'length of the attention span',
'dest': 'attn_span'
},
'--dropout': {
'type': float,
'default': 0.2,
'help': 'dropout rate of ReLU and attention',
'dest': 'dropout'
},
'--emb-dropout': {
'type': float,
'default': 0.,
'help': 'the dropout rate applied on I/O embeddings',
'dest': 'emb_dropout'
},
},
# optimization-specific
'optim_params': {
'--lr': {
'type': float,
'default': 0.03,
'help': 'learning rate',
'dest': 'lr'
},
'--momentum': {
'type': float,
'default': 0.9,
'help': 'SGD momentum',
'dest': 'momentum'
},
'--optim': {
'type': str,
'default': 'sgd',
'help': 'optimization method: sgd | adagrad',
'dest': 'optim'
},
'--lr-warmup': {
'type': int,
'default': 0,
'help': 'linearly increase LR from 0 '
'during first lr_warmup updates',
'dest': 'lr_warmup'
},
'--grad-clip': {
'type': float,
'default': 0,
'help': '[only works with adagrad!] '
'clip gradient of each module parameters by a given '
'value',
'dest': 'grad_clip'
},
},
# trainer-specific
'trainer_params': {
'--batch-sz': {
'type': int,
'default': 64,
'help': 'batch size',
'dest': 'batch_size'
},
'--batch-split': {
'type': int,
'default': 1,
'help': 'split a batch into smaller parts to fit in GPU memory',
'dest': 'batch_split'
},
'--nbatches': {
'type': int,
'default': 1000,
'help': 'number of batches in each iteration',
'dest': 'nb_batches_per_iter'
},
'--niter': {
'type': int,
'default': 1000,
'help': 'number of iterations to train',
'dest': 'nb_iter'
},
'--checkpoint': {
'type': str,
'default': '',
'help': 'path to save/load model',
'dest': 'checkpoint_path'
},
'--full-eval-mode': {
'action': 'store_true',
'default': False,
'help': 'do evaluation on the whole validation and the test data',
'dest': 'full_eval_mode'
},
},
# adaptive I/O specific params
'adapt_io_params': {
'--adapt-io': {
'action': 'store_true',
'default': False,
'help': 'enable adaptive input and output representations',
'dest': 'adapt_io_enabled'
},
'--adapt-io-tied': {
'action': 'store_true',
'default': False,
'help': 'tie the input parameters with the output parameters',
'dest': 'adapt_io_tied'
},
'--adapt-io-divval': {
'type': int,
'default': 4,
'help': 'dimension division value',
'dest': 'adapt_io_divval'
},
'--adapt-io-cutoffs': {
'type': int,
'default': [20000, 40000, 200000],
'help': 'cutoffs values',
'dest': 'adapt_io_cutoffs'
},
},
# adaptive attention span specific params
'adapt_span_params': {
'--adapt-span': {
'action': 'store_true',
'default': False,
'help': 'enable adaptive attention span',
'dest': 'adapt_span_enabled'
},
'--adapt-span-loss': {
'type': float,
'default': 0,
'help': 'the loss coefficient for span lengths',
'dest': 'adapt_span_loss'
},
'--adapt-span-ramp': {
'type': int,
'default': 32,
'help': 'ramp length of the soft masking function',
'dest': 'adapt_span_ramp'
},
'--adapt-span-init': {
'type': float,
'default': 0,
'help': 'initial attention span ratio',
'dest': 'adapt_span_init'
},
'--adapt-span-cache': {
'action': 'store_true',
'default': False,
'help': 'adapt cache size as well to reduce memory usage',
'dest': 'adapt_span_cache'
},
},
# persistent memory specific params
'pers_mem_params': {
'--pers-mem-size': {
'type': int,
'default': 0,
'help': 'the number of persistent memory vectors',
'dest': 'pers_mem_size'
},
},
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveMask(nn.Module):
"""Soft masking function for adaptive size.
It masks out the last K values of an input. The masking value
goes from 1 to 0 gradually, so K can be learned with
back-propagation.
Args:
max_size: maximum size (i.e. input dimension)
ramp_size: size of the ramp going from 0 to 1
init_val: initial size proportion not to be masked out
shape: learn multiple sizes independent of each other
"""
def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)):
nn.Module.__init__(self)
self._max_size = max_size
self._ramp_size = ramp_size
self.current_val = nn.Parameter(torch.zeros(*shape) + init_val)
mask_template = torch.linspace(1 - max_size, 0, steps=max_size)
self.register_buffer('mask_template', mask_template)
def forward(self, x):
mask = self.mask_template + self.current_val * self._max_size
mask = mask / self._ramp_size + 1
mask = mask.clamp(0, 1)
if x.size(-1) < self._max_size:
# the input could have been trimmed beforehand to save computation
mask = mask[:, :, -x.size(-1):]
x = x * mask
return x
def get_current_max_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.max().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def get_current_avg_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.mean().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def clamp_param(self):
"""this need to be called after each update"""
self.current_val.data.clamp_(0, 1)
class AdaptiveSpan(nn.Module):
"""Adaptive attention span for Transformerself.
This module learns an attention span length from data for each
self-attention head.
Args:
attn_span: maximum attention span
adapt_span_loss: loss coefficient for the span length
adapt_span_ramp: length of the masking ramp
adapt_span_init: initial size ratio
adapt_span_cache: adapt cache size to reduce memory usage
"""
def __init__(self, attn_span, adapt_span_loss, adapt_span_ramp,
adapt_span_init, adapt_span_cache, nb_heads, **kargs):
nn.Module.__init__(self)
self._adapt_cache = adapt_span_cache
self._max_span = attn_span
self._loss_coeff = adapt_span_loss
self._nb_heads = nb_heads
self._mask = AdaptiveMask(max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
shape=(nb_heads, 1, 1))
def forward(self, attn, normalize=True):
"""mask attention with the right span"""
# batch and head dimensions are merged together, so separate them first
B = attn.size(0) # batch size
M = attn.size(1) # block size
attn = attn.reshape(B // self._nb_heads, self._nb_heads, M, -1)
attn = self._mask(attn)
if normalize:
attn = attn / (attn.sum(-1, keepdim=True) + 1e-8) # normalize so sum is 1
attn = attn.view(B, M, -1)
return attn
def get_trim_len(self):
"""how much of memory can be trimmed to reduce computation"""
L = self._max_span
trim_len = min(L - 1, L - self._mask.get_current_max_size())
# too fine granularity might be bad for the memory management
trim_len = math.floor(trim_len / 64) * 64
return trim_len
def trim_memory(self, query, key, value, key_pe):
"""trim out unnecessary memory beforehand to reduce computation"""
trim_len = self.get_trim_len()
cache_size = key.size(1) - query.size(1)
trim_len_cache = trim_len - (self._max_span - cache_size)
if trim_len_cache > 0:
key = key[:, trim_len_cache:, :]
value = value[:, trim_len_cache:, :]
elif trim_len_cache < 0:
# cache is too short! this happens when validation resumes
# after a lot of updates.
key = F.pad(key, [0, 0, -trim_len_cache, 0])
value = F.pad(value, [0, 0, -trim_len_cache, 0])
if trim_len > 0:
if key_pe is not None:
key_pe = key_pe[:, :, trim_len:]
return key, value, key_pe
def get_cache_size(self):
"""determine how long the cache should be"""
if self._adapt_cache:
trim_len = self.get_trim_len()
# give a buffer of 64 steps since a span might increase
# in future updates
return min(self._max_span, self._max_span - trim_len + 64)
else:
return self._max_span
def get_loss(self):
"""a loss term for regularizing the span length"""
return self._loss_coeff * self._max_span * self._mask.current_val.mean()
def get_current_max_span(self):
return self._mask.get_current_max_size()
def get_current_avg_span(self):
return self._mask.get_current_avg_size()
def clamp_param(self):
self._mask.clamp_param()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from adaptive_span import AdaptiveSpan
from persistent_memory import PersistentMemory
from adaptive_io import build_adaptive_io, compute_dummy_loss
# Size notations:
# B = batch_size, H = hidden_size, M = block_size, L = attn_span
def _skew(X, pad_value):
"""shift every row 1 step to right"""
# X = B x M x L
B, M, L = X.size()
X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1)
X = X.view(B, -1) # B x ML+MM+M
X = X[:, :-M] # B x ML+MM
X = X.view(B, M, M + L) # B x M x L+M
return X
def _unskew(X):
"""reverse _skew operation"""
# X = B x M x L+M
B, M, L = X.size()
L -= M
X = X.view(B, -1) # B x ML+MM
X = F.pad(X, (0, M)) # B x ML+MM+M
X = X.view(B, M, M + L + 1) # B x M x L+M+1
X = X[:, :, :L] # B x M x L
return X
class SeqAttention(nn.Module):
"""Sequential self-attention layer.
Each token will attend to its previous fixed number of steps.
Note that attention doesn't include the current step itself.
"""
def __init__(self, hidden_size, nb_heads, attn_span,
dropout, adapt_span_params, pers_mem_params, **kargs):
nn.Module.__init__(self)
self.dropout = nn.Dropout(dropout)
self.hidden_size = hidden_size # size of a single head
self.attn_span = attn_span
self.adapt_span_enabled = adapt_span_params['adapt_span_enabled']
if self.adapt_span_enabled:
self.adaptive_span = AdaptiveSpan(attn_span=attn_span, nb_heads=nb_heads,
**adapt_span_params, **kargs)
self.persistent_memory = None
if pers_mem_params['pers_mem_size'] > 0:
self.persistent_memory = PersistentMemory(
pers_mem_params['pers_mem_size'], nb_heads, hidden_size, dropout)
if self.adapt_span_enabled:
self.persistent_memory.adaptive_span = self.adaptive_span
def forward(self, query, key, value, key_pe):
# query size = B x M x H
# key, value sizes = B x (M+L) x H
if self.adapt_span_enabled:
# [optional] trim out memory to reduce unnecessary computation
key, value, key_pe = self.adaptive_span.trim_memory(
query, key, value, key_pe)
# compute attention from context
# B x M (dest) x (M+L) (src)
attn_cont = torch.matmul(query, key.transpose(-1, -2))
attn_cont = _unskew(attn_cont) # B x M x L
# compute the effect of position embedding
attn_pos = torch.matmul(query, key_pe) # B x M x L_pos
attn = attn_cont + attn_pos
if self.persistent_memory is not None:
attn, pers_mem_out = self.persistent_memory(query, attn)
else:
attn = attn / math.sqrt(self.hidden_size) # B x M X L_pos
attn = F.softmax(attn, dim=-1)
if self.adapt_span_enabled:
# trim attention lengths according to the learned span
attn = self.adaptive_span(attn)
attn = self.dropout(attn) # B x M X L_pos
attn_cont = _skew(attn, 0) # B x M X (L+M)
out = torch.matmul(attn_cont, value) # B x M x H
if self.persistent_memory is not None:
out = out + pers_mem_out
return out
def get_cache_size(self):
if self.adapt_span_enabled:
return self.adaptive_span.get_cache_size()
else:
return self.attn_span
class MultiHeadSeqAttention(nn.Module):
def __init__(self, hidden_size, nb_heads, **kargs):
nn.Module.__init__(self)
assert hidden_size % nb_heads == 0
self.nb_heads = nb_heads
self.head_dim = hidden_size // nb_heads
self.attn = SeqAttention(
hidden_size=self.head_dim, nb_heads=nb_heads, **kargs)
self.proj_query = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_out = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_val = nn.Linear(hidden_size, hidden_size, bias=False)
self.proj_key = nn.Linear(hidden_size, hidden_size, bias=False)
def head_reshape(self, x):
K = self.nb_heads
D = self.head_dim
x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D
return x
def forward(self, query, key, value, key_pe):
B = query.size(0)
K = self.nb_heads
D = self.head_dim
M = query.size(1)
query = self.proj_query(query)
query = self.head_reshape(query)
value = self.proj_val(value)
value = self.head_reshape(value)
key = self.proj_key(key)
key = self.head_reshape(key)
out = self.attn(query, key, value, key_pe) # B_K x M x D
out = out.view(B, K, M, D) # B x K x M x D
out = out.transpose(1, 2).contiguous() # B x M x K x D
out = out.view(B, M, -1) # B x M x K_D
out = self.proj_out(out)
return out
class FeedForwardLayer(nn.Module):
def __init__(self, hidden_size, inner_hidden_size, dropout, **kargs):
nn.Module.__init__(self)
self.fc1 = nn.Linear(hidden_size, inner_hidden_size)
self.fc2 = nn.Linear(inner_hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, h):
h1 = F.relu(self.fc1(h))
h1 = self.dropout(h1)
h2 = self.fc2(h1)
return h2
class TransformerSeqLayer(nn.Module):
def __init__(self, hidden_size, **kargs):
nn.Module.__init__(self)
self.attn = MultiHeadSeqAttention(hidden_size=hidden_size, **kargs)
self.norm1 = nn.LayerNorm(hidden_size)
if kargs['pers_mem_params']['pers_mem_size'] > 0:
# replacing FF with persistent memory
self.ff = None
else:
self.ff = FeedForwardLayer(hidden_size=hidden_size, **kargs)
self.norm2 = nn.LayerNorm(hidden_size)
def forward(self, h, h_cache, key_pe):
# h = B x M x H
# h_cache = B x L x H
h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H
attn_out = self.attn(h, h_all, h_all, key_pe)
h = self.norm1(h + attn_out) # B x M x H
if self.ff is not None:
ff_out = self.ff(h)
out = self.norm2(h + ff_out) # B x M x H
else:
out = h
return out
class TransformerSeq(nn.Module):
def __init__(self, vocab_size, hidden_size, nb_heads, nb_layers,
attn_span, emb_dropout, adapt_io_params, **kargs):
nn.Module.__init__(self)
# token embeddings
self.adapt_io = adapt_io_params['adapt_io_enabled']
if self.adapt_io:
self.in_emb, self.out_emb = build_adaptive_io(
vocab_size, hidden_size, **adapt_io_params)
else:
self.in_emb = nn.Embedding(vocab_size, hidden_size)
self.out_emb = nn.Linear(hidden_size, vocab_size)
if emb_dropout > 0:
self.emb_dropout = nn.Dropout(emb_dropout)
else:
self.emb_dropout = None
# position embeddings
self.key_pe = nn.Parameter(
torch.randn(1, hidden_size // nb_heads, attn_span))
self.layers = nn.ModuleList()
self.layers.extend(
TransformerSeqLayer(
hidden_size=hidden_size, nb_heads=nb_heads,
attn_span=attn_span, **kargs)
for _ in range(nb_layers))
def forward(self, x, h_cache, target=None):
# x size = B x M
block_size = x.size(1)
h = self.in_emb(x) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
h_cache_next = []
for l, layer in enumerate(self.layers):
cache_size = layer.attn.attn.get_cache_size()
if cache_size > block_size:
h_cache_next_l = torch.cat(
[h_cache[l][:, -cache_size + block_size:, :], h],
dim=1).detach()
else:
h_cache_next_l = h[:, -cache_size:, :].detach()
h_cache_next.append(h_cache_next_l)
h = layer(h, h_cache[l], self.key_pe) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
if self.adapt_io:
# loss is computed here
out = self.out_emb(h, target)
dummy_loss = compute_dummy_loss(self.in_emb, self.out_emb)
else:
out = F.log_softmax(self.out_emb(h), dim=-1)
dummy_loss = None
return out, h_cache_next, dummy_loss
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import os
import math
import argparse
import torch
from adagrad_with_grad_clip import AdagradWithGradClip
def _parse_args(params_config, args):
parser = argparse.ArgumentParser()
for params_category in params_config: # e.g., 'model_params'
for param_flag, param_config in params_config[params_category].items():
# e.g., param_flag = '--block-sz'
parser.add_argument(param_flag, **param_config)
return parser.parse_args(args)
def get_params(params_config, args=None):
namespace = _parse_args(params_config, args)
return {
params_category: {
param_config['dest']:
namespace.__getattribute__(param_config['dest'])
for param_config in params_config[params_category].values()
}
for params_category in params_config
}
##############################################################################
# ENVIRONMENT
##############################################################################
def _torch_distributed_init_process_group(local_rank):
torch.distributed.init_process_group(
backend='nccl',
init_method='env://'
)
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
print('my rank={} local_rank={}'.format(rank, local_rank))
torch.cuda.set_device(local_rank)
return {
'rank': rank,
'world_size': world_size,
}
def set_up_env(env_params):
assert torch.cuda.is_available()
if env_params['distributed']:
env_params.update(
_torch_distributed_init_process_group(
local_rank=env_params['local_rank']))
env_params['device'] = torch.device('cuda')
##############################################################################
# OPTIMIZER AND SCHEDULER
##############################################################################
def _get_grad_requiring_params(model):
nb_parameters = 0
grad_requiring_params = []
for param in model.parameters():
if param.requires_grad:
nb_parameters += param.numel()
grad_requiring_params.append(param)
print('nb_parameters={:.2f}M'.format(nb_parameters / 1e6))
return grad_requiring_params
def _get_optimizer(model,
optim,
lr: float,
momentum: float,
grad_clip: float):
if optim == 'sgd':
optimizer = torch.optim.SGD(_get_grad_requiring_params(model),
lr=lr,
momentum=momentum)
optimizer.grad_clip = grad_clip
return optimizer
elif optim == 'adagrad':
optimizer = AdagradWithGradClip(_get_grad_requiring_params(model),
lr=lr,
grad_clip=grad_clip)
optimizer.grad_clip = 0 # done internally
return optimizer
elif optim == 'adam':
optimizer = torch.optim.Adam(_get_grad_requiring_params(model),
lr=lr)
optimizer.grad_clip = grad_clip
return optimizer
else:
raise RuntimeError("wrong type of optimizer "
"- must be 'sgd', 'adagrad' or 'adam'")
def _get_scheduler(optimizer, lr_warmup):
if lr_warmup > 0:
return torch.optim.lr_scheduler.LambdaLR(
optimizer, lambda ep: min(1, ep / lr_warmup))
return None
def get_optimizer_and_scheduler(model, optim_params):
optimizer = _get_optimizer(model=model,
optim=optim_params['optim'],
lr=optim_params['lr'],
momentum=optim_params['momentum'],
grad_clip=optim_params['grad_clip'])
scheduler = _get_scheduler(optimizer=optimizer,
lr_warmup=optim_params['lr_warmup'])
return optimizer, scheduler
##############################################################################
# CHECKPOINT
##############################################################################
def _load_checkpoint(checkpoint_path, model, optimizer, scheduler, logger,
distributed):
print('loading from a checkpoint at {}'.format(checkpoint_path))
if distributed:
# the model is saved from gpu0 so we need to map it to CPU first
checkpoint_state = torch.load(
checkpoint_path, map_location=lambda storage, loc: storage)
else:
checkpoint_state = torch.load(checkpoint_path)
iter_init = checkpoint_state['iter_no'] + 1 # next iteration
model.load_state_dict(checkpoint_state['model'])
optimizer.load_state_dict(checkpoint_state['optimizer'])
logger.load_state_dict(checkpoint_state['logger'])
if 'scheduler_iter' in checkpoint_state:
# we only need the step count
scheduler.step(checkpoint_state['scheduler_iter'])
return iter_init
def load_checkpoint(checkpoint_path, model, optimizer, scheduler, logger,
distributed):
if checkpoint_path and os.path.exists(checkpoint_path):
return _load_checkpoint(checkpoint_path=checkpoint_path,
model=model,
optimizer=optimizer,
scheduler=scheduler,
logger=logger,
distributed=distributed)
return 0
def save_checkpoint(checkpoint_path, iter_no, model,
optimizer, scheduler, logger):
if checkpoint_path:
checkpoint_state = {
'iter_no': iter_no, # last completed iteration
'model': model.state_dict(),
'logger': logger.state_dict(),
'optimizer': optimizer.state_dict(),
}
if scheduler is not None:
checkpoint_state['scheduler_iter'] = scheduler.last_epoch
torch.save(checkpoint_state, checkpoint_path)
##############################################################################
# LOGGER
##############################################################################
class Logger:
def __init__(self, data_unit):
self.data_unit = data_unit
self._state_dict = dict()
def load_state_dict(self, state_dict):
self._state_dict = state_dict
def state_dict(self):
return self._state_dict
def _log(self, title, value):
if title not in self._state_dict:
self._state_dict[title] = []
self._state_dict[title].append(value)
def log_iter(self, iter_no, nb_batches_per_iter, loss_train, loss_val,
elapsed, model):
step = (iter_no + 1) * nb_batches_per_iter
self._log(title='step', value=step)
msg = 'steps: {}'.format(step)
if self.data_unit == 'bpc':
train_bpc = float(loss_train / math.log(2))
val_bpc = float(loss_val / math.log(2))
msg += '\ttrain: {:.3f}bpc\tval: {:.3f}bpc'.format(train_bpc, val_bpc)
self._log(title='train_bpc', value=train_bpc)
self._log(title='val_bpc', value=val_bpc)
else:
train_ppl = math.exp(loss_train)
val_ppl = math.exp(loss_val)
msg += '\ttrain: {:.2f}ppl\tval: {:.2f}ppl'.format(train_ppl, val_ppl)
self._log(title='train_ppl', value=train_ppl)
self._log(title='val_ppl', value=val_ppl)
msg += '\tms/batch: {:.1f}'.format(elapsed)
if model.module.layers[0].attn.attn.adapt_span_enabled:
avg_spans = []
max_spans = []
for layer in model.module.layers:
avg_spans.append(
layer.attn.attn.adaptive_span.get_current_avg_span())
max_spans.append(
layer.attn.attn.adaptive_span.get_current_max_span())
span_avg = float(sum(avg_spans)) / len(avg_spans)
span_max = float(max(max_spans))
self._log('span_avg', span_avg)
self._log('span_max', span_max)
msg += "\tspan_avg: {:.0f}\tspan_max: {:.0f}".format(span_avg, span_max)
print(msg)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import random
import torch
def _train_step(model, X, Y, h_cache, eval_only, loss_div=1):
"""Single training step."""
out, h_cache, dummy_loss = model(X, h_cache, target=Y)
if model.module.adapt_io:
loss = out.mean() + dummy_loss.sum()
else:
out = out.view(-1, out.size(-1))
loss = torch.nn.functional.nll_loss(out, Y.view(-1))
loss_value = loss.item() / loss_div
if not eval_only:
# loss term from adaptive-span
if model.module.layers[0].attn.attn.adapt_span_enabled:
loss += sum(layer.attn.attn.adaptive_span.get_loss()
for layer in model.module.layers)
(loss / loss_div).backward()
return loss_value, h_cache
def _train_batch(model, optimizer, scheduler, X, Y, h_cache,
eval_only, batch_split):
"""Train on a batch."""
optimizer.zero_grad()
if batch_split == 1:
# process a batch in a single step (default behaviour)
loss_value, h_cache = _train_step(model, X, Y, h_cache, eval_only)
else:
# split a batch into multiple pieces that each can fit in memory
assert X.size(0) % batch_split == 0
split_size = X.size(0) // batch_split
loss_value = 0
h_cache_list = []
for split_ind in range(batch_split):
split_slice = slice(split_ind*split_size, (split_ind+1)*split_size)
split_h_cache = [h[split_slice,:,:] for h in h_cache]
split_loss_value, split_h_cache = _train_step(
model, X[split_slice,:], Y[split_slice],
split_h_cache, eval_only, batch_split)
loss_value += split_loss_value
h_cache_list.append(split_h_cache)
h_cache = [
torch.cat(
[h_cache_list[i][l] for i in range(batch_split)]
, dim=0) for l in range(len(h_cache))]
if not eval_only:
if scheduler is not None:
scheduler.step()
if optimizer.grad_clip > 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), optimizer.grad_clip)
optimizer.step()
# make sure span parameters are in a correct range
if model.module.layers[0].attn.attn.adapt_span_enabled:
for layer in model.module.layers:
layer.attn.attn.adaptive_span.clamp_param()
return loss_value, h_cache
def train_iteration(model, optimizer, scheduler, data, nb_batches_per_iter,
block_size, eval_only, train_pos, h_cache, batch_split):
"""Single training iteration."""
if eval_only:
model.eval()
else:
model.train()
nb_batches_per_iter_max = nb_batches_per_iter
if eval_only:
# eval on fewer batches during training for speed-up
nb_batches_per_iter_max = max(1, nb_batches_per_iter // 10)
nb_batches_per_iter_max = min(nb_batches_per_iter_max,
math.ceil(data.size(1) / block_size))
loss_all = 0
actual_nb_batches_per_iter = 0
for _ in range(nb_batches_per_iter_max):
actual_nb_batches_per_iter += 1
X = data[:, train_pos: train_pos + block_size].contiguous()
Y = data[:, train_pos + 1: train_pos + block_size + 1].contiguous()
loss, h_cache = _train_batch(
model=model,
optimizer=optimizer,
scheduler=scheduler,
X=X, Y=Y,
h_cache=h_cache,
eval_only=eval_only,
batch_split=batch_split)
loss_all += loss
train_pos += block_size
if train_pos >= data.size(1) - block_size:
# reached the end. randomize the offset to reduce overfitting
train_pos = random.randrange(block_size)
# reset the cache
for h in h_cache:
h.fill_(0)
loss_all = loss_all / actual_nb_batches_per_iter
return loss_all, train_pos, h_cache
# do full evaluation
def full_eval(model, optimizer, scheduler, data, block_size, hidden_size):
model.eval()
train_pos = 0
nb_batches_per_iter_max = math.ceil(data.size(1) / block_size)
h_cache = [
torch.zeros(
data.size(0),
layer.attn.attn.get_cache_size(),
hidden_size).to(data.device)
for layer in model.module.layers]
loss_all = 0
actual_nb_batches_per_iter = 0
for _ in range(nb_batches_per_iter_max):
actual_nb_batches_per_iter += 1
X = data[:, train_pos: train_pos + block_size].contiguous()
Y = data[:, train_pos + 1: train_pos + block_size + 1].contiguous()
loss, h_cache = _train_batch(
model=model,
optimizer=optimizer,
scheduler=scheduler,
X=X, Y=Y,
h_cache=h_cache,
eval_only=True,
batch_split=1)
loss_all += loss
train_pos += block_size
if train_pos >= data.size(1) - block_size:
# Skip the remaining tokens as it can't make a whole block.
# An effect on performance should be negligable for a large data.
break
loss_all = loss_all / actual_nb_batches_per_iter
return loss_all
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
from argparse import Namespace
import math
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class PersistentMemory(nn.Module):
def __init__(self, size, nb_heads, head_dim, dropout):
super(PersistentMemory, self).__init__()
self.size = size
self.nb_heads = nb_heads
self.head_dim = head_dim
# different heads have different vectors
self.key = nn.Parameter(torch.randn(self.nb_heads, self.head_dim, self.size) / math.sqrt(self.head_dim))
self.val = nn.Parameter(torch.randn(self.nb_heads, self.size, self.head_dim) / math.sqrt(self.size))
self.dropout = nn.Dropout(dropout)
self.adaptive_span = None
def forward(self, query, attn):
key = self.key.unsqueeze(0)
val = self.val.unsqueeze(0)
query = query.view((-1, self.nb_heads) + query.size()[1:])
attn_pers = torch.matmul(query, key * math.sqrt(self.head_dim))
attn_pers = attn_pers.view((-1,) + attn_pers.size()[2:])
# compute softmax jointly
attn = torch.cat((attn, attn_pers), dim=-1)
attn = attn / math.sqrt(self.head_dim) # B x M X L_total
attn = F.softmax(attn, dim=-1)
attn_pers = attn[:, :, -key.size(-1):]
attn = attn[:, :, :-key.size(-1)] # B x M X L
# adapt attention span
if self.adaptive_span is not None:
attn = self.adaptive_span(attn, normalize=False)
# normalize the sum jointly!
attn = torch.cat((attn, attn_pers), dim=-1)
attn = attn / (attn.sum(-1, keepdim=True) + 1e-8)
attn_pers = attn[:, :, -key.size(-1):]
attn = attn[:, :, :-key.size(-1)] # B x M X L
attn_pers = self.dropout(attn_pers) # B x M X L
attn_pers = attn_pers.view((-1, self.nb_heads) + attn_pers.size()[1:])
out = torch.matmul(attn_pers, val * math.sqrt(self.size))
out = out.view((-1,) + out.size()[2:])
return attn, out
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import math
import time
import torch
from config import PARAMS_CONFIG
from data import get_train_val_test_data
from models import TransformerSeq
from trainer import train_iteration, full_eval
from utils import (
get_params,
set_up_env,
get_optimizer_and_scheduler,
load_checkpoint,
save_checkpoint,
Logger)
def launch(env_params,
model_params,
adapt_io_params,
adapt_span_params,
pers_mem_params,
optim_params,
data_params,
trainer_params):
# ENVIRONMENT (device, distributed, etc.)
set_up_env(env_params)
device = env_params['device']
distributed = env_params['distributed']
if distributed == False or env_params['rank'] == 0:
print('model_params:\t', model_params)
print('optim_params:\t', optim_params)
print('data_params:\t', data_params)
print('trainer_params:\t', trainer_params)
print('adapt_io_params:\t', adapt_io_params)
print('adapt_span_params:\t', adapt_span_params)
print('pers_mem_params:\t', pers_mem_params)
# DATA
train_data, val_data, test_data = get_train_val_test_data(
data_params=data_params,
env_params=env_params,
batch_size=trainer_params['batch_size'],
device=device,
sort_dict=adapt_io_params['adapt_io_enabled'])
# MODEL
model = TransformerSeq(
vocab_size=data_params['vocab_size'], **model_params,
adapt_io_params=adapt_io_params,
adapt_span_params=adapt_span_params,
pers_mem_params=pers_mem_params)
if distributed:
local_rank = env_params['local_rank']
model = model.to(device)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank)
else:
model = torch.nn.DataParallel(model)
model = model.to(device)
# OPTIMIZER AND SCHEDULER
optimizer, scheduler = get_optimizer_and_scheduler(
model=model, optim_params=optim_params)
# create logger
logger = Logger(data_params['data_unit'])
# resume training from last checkpoint if exists
iter_init = load_checkpoint(
trainer_params['checkpoint_path'], model, optimizer, scheduler,
logger, distributed)
if trainer_params['full_eval_mode']:
# evaluate the model on test data
with torch.no_grad():
loss_val = full_eval(model, optimizer, scheduler, val_data,
model_params['block_size'],
model_params['hidden_size'])
loss_test = full_eval(model, optimizer, scheduler, test_data,
model_params['block_size'],
model_params['hidden_size'])
if distributed:
# collect results into rank0
stats = torch.tensor(
[loss_val, loss_test]).to(device)
torch.distributed.reduce(stats, 0)
if env_params['rank'] == 0:
loss_val = stats[0] / env_params['world_size']
loss_test = stats[1] / env_params['world_size']
else:
return
if data_params['data_unit'] == 'bpc':
print('val: {:.3f}bpc'.format(loss_val / math.log(2)))
print('test: {:.3f}bpc'.format(loss_test / math.log(2)))
else:
print('val: {:.2f}ppl'.format(math.exp(loss_val)))
print('test: {:.2f}ppl'.format(math.exp(loss_test)))
return
# position of current batch
data_pos = [0] * 2
# initialize caches for train and valid
hid_cache = [[
torch.zeros(
train_data.size(0),
layer.attn.attn.get_cache_size(),
model_params['hidden_size']).to(device)
for layer in model.module.layers] for _ in range(2)]
nb_batches_per_iter = trainer_params['nb_batches_per_iter']
for iter_no in range(iter_init, trainer_params['nb_iter']):
t_sta = time.time()
loss_train, data_pos[0], hid_cache[0] = train_iteration(
model, optimizer, scheduler, train_data, nb_batches_per_iter,
model_params['block_size'], False, data_pos[0], hid_cache[0],
trainer_params['batch_split'])
elapsed = 1000 * (time.time() - t_sta) / nb_batches_per_iter
with torch.no_grad():
loss_val, data_pos[1], hid_cache[1] = train_iteration(
model, optimizer, scheduler, val_data, nb_batches_per_iter,
model_params['block_size'], True, data_pos[1], hid_cache[1],
trainer_params['batch_split'])
if distributed:
# collect results into rank0
stats = torch.tensor(
[loss_train, loss_val]).to(device)
torch.distributed.reduce(stats, 0)
if env_params['rank'] == 0:
loss_train = stats[0] / env_params['world_size']
loss_val = stats[1] / env_params['world_size']
else:
continue
logger.log_iter(iter_no, nb_batches_per_iter, loss_train,
loss_val, elapsed, model)
save_checkpoint(trainer_params['checkpoint_path'],
iter_no, model, optimizer, scheduler, logger)
if __name__ == '__main__':
launch(**get_params(params_config=PARAMS_CONFIG))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
import os
import torch
class Dictionary(object):
def __init__(self, path, sort_dict=False):
self.word2idx = {}
self.word2count = {}
self.idx2word = []
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
if sort_dict:
self.word2count[word] = self.word2count.get(word, 0) + 1
elif word not in self.word2idx:
self.word2idx[word] = len(self.idx2word)
self.idx2word.append(word)
if sort_dict:
# Sort dictionary by count and build indices accordingly:
sorted_dict = sorted(self.word2count.items(), key=lambda kv: kv[1])[::-1]
for i in range(len(sorted_dict)):
word = sorted_dict[i][0]
self.word2idx[word] = i
self.idx2word.append(word)
def __len__(self):
return len(self.idx2word)
def _tokenize(text_path, dictionary):
"""Tokenizes a text file."""
print('Tokenizing {}'.format(text_path))
assert os.path.exists(text_path)
# Assign to each token its identifier
ids = []
with open(text_path, 'r', encoding="utf8") as f:
for line in f:
tokens = line.split() + ['<eos>']
for token in tokens:
ids.append(dictionary[token])
ids = torch.LongTensor(ids)
return ids
class Corpus:
def __init__(self, data_path, sort_dict):
print('Building dictionary')
self._dictionary = Dictionary(os.path.join(data_path, 'train.txt'), sort_dict)
self.train = _tokenize(
text_path=os.path.join(data_path, 'train.txt'),
dictionary=self._dictionary.word2idx)
self.valid = _tokenize(
text_path=os.path.join(data_path, 'valid.txt'),
dictionary=self._dictionary.word2idx)
self.test = _tokenize(
text_path=os.path.join(data_path, 'test.txt'),
dictionary=self._dictionary.word2idx)
@property
def vocab_size(self):
return len(self._dictionary)
def _batchify(data_tensor, batch_size):
nb_batches = data_tensor.size(0) // batch_size
# trim away some tokens to make whole batches
data_tensor = data_tensor.narrow(0, 0, nb_batches * batch_size)
data_tensor = data_tensor.view(batch_size, -1).contiguous()
return data_tensor
def _build_corpus(data_path, env_params, sort_dict):
# save the corpus to a file so that it's faster next time
if sort_dict:
corpus_path = os.path.join(data_path, 'corpus_sorted.pt')
else:
corpus_path = os.path.join(data_path, 'corpus.pt')
if os.path.exists(corpus_path):
print('Loading an existing corpus file from {}'.format(corpus_path))
corpus = torch.load(corpus_path)
else:
print('Creating a corpus file at {}'.format(corpus_path))
if env_params['distributed']:
# only one process need to create a corpus file
if env_params['rank'] == 0:
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
# sync with other processes
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
else:
print('Waiting rank0 to create a corpus file.')
# sync with rank0
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
corpus = torch.load(corpus_path)
else:
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
return corpus
def _get_train_val_test_data(corpus, batch_size):
return [
_batchify(corpus.train, batch_size),
_batchify(corpus.valid, batch_size),
_batchify(corpus.test, batch_size)
]
def get_train_val_test_data(data_params, env_params, batch_size, device, sort_dict):
corpus = _build_corpus(data_params['data_path'], env_params, sort_dict)
data_params['vocab_size'] = corpus.vocab_size
train_data, val_data, test_data = _get_train_val_test_data(
corpus=corpus, batch_size=batch_size)
if env_params['distributed']:
# split the data into equal parts
assert batch_size % env_params['world_size'] == 0
device_batch_size = batch_size // env_params['world_size']
slice_data = slice(
device_batch_size * env_params['rank'],
device_batch_size * (env_params['rank'] + 1))
train_data = train_data[slice_data]
val_data = val_data[slice_data]
test_data = test_data[slice_data]
train_data = train_data.to(device)
val_data = val_data.to(device)
test_data = test_data.to(device)
return train_data, val_data, test_data
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
from torch.optim import Adagrad
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algoritm with custom gradient clipping"""
def __init__(self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0):
Adagrad.__init__(self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
self.defaults['grad_clip'] = grad_clip
self.param_groups[0].setdefault('grad_clip', grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is "
"not compatible with sparse "
"gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = (group['lr'] /
(1 + (state['step'] - 1) * group['lr_decay']))
# clip
clr = _clip_grad(clr=clr,
grad=grad,
group_grad_clip=group['grad_clip'])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
tokenizer.add_special_tokens({'additional_special_tokens': ['<|belief|>', '<|endofbelief|>', '<|action|>', '<|endofaction|>', \
'<|response|>', '<|endofresponse|>', '<|context|>', '<|endofcontext|>', '<|user|>', '<|system|>', \
'<|task|>', '<|endoftask|>', '<|chitchat|>', '<|endofchitchat|>']})
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = get_dataset(data_args, tokenizer=tokenizer) if training_args.do_train else None
eval_dataset = get_dataset(data_args, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
args = parser.parse_args()
random.seed(42)
pairs = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, clean(data[i]["turns"][j][ps][k]["candidate"])]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
for s in ["train", "dev", "test"]:
with open("parlai_"+(s if s != "dev" else "valid")+".txt", "w", encoding='utf8') as f:
for i in range(len(pairs[s])):
f.write("text:" + pairs[s][i][0] + "\t" + "labels:" + pairs[s][i][1] + "\tepisode_done:True\n")
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from utils import bleuscorer
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--inference", default="dev.inference.gpt2_10epoch_1e-3_fp16.json", type=str, required=False, help='inference file')
parser.add_argument("--datafolder", default="./simpletod/", type=str, required=False, help='data folder')
parser.add_argument("--predictionfolder", default="./prediction/", type=str, required=False, help='prediction folder')
parser.add_argument("--split", default="dev", type=str, required=False, help="[dev,test]")
args = parser.parse_args()
inference = args.inference
datafolder = args.datafolder
predictionfolder = args.predictionfolder
folder = args.split + "/"
if inference.endswith(".txt"):
with open(inference, "r") as f:
predict = f.read().strip().split("\n")
predict = [a.strip() for a in predict]
else:
with open(inference, "r") as f:
predict = json.load(f)
idx = 0
cnt = 0
seen_services = set()
with open(datafolder + "train/" + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
seen_services.add(schema[i]["service_name"])
domain_slots = set()
with open(datafolder + folder + "schema.json", "r") as f:
schema = json.load(f)
for i in range(len(schema)):
for j in range(len(schema[i]["slots"])):
assert(" " not in schema[i]["slots"][j])
domain_slots.add(schema[i]["service_name"].split("_")[0].lower() + " " + schema[i]["slots"][j]["name"].lower())
fns = os.listdir(datafolder + folder)
fns.sort()
act_precision = []
act_recall = []
seen_act_precision = []
seen_act_recall = []
unseen_act_precision = []
unseen_act_recall = []
bleu = []
bleua = []
bleub = []
seenbleu = []
seenbleua = []
seenbleub = []
unseenbleu = []
unseenbleua = []
unseenbleub = []
for fn in fns:
if not fn.startswith("dialogue"):
continue
if fn.startswith("dialogues_and_metrics.json"):
continue
with open(datafolder + folder + fn, "r") as f:
data = json.load(f)
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
cnt += 1
if idx >= len(predict):
continue
belief = predict[idx].split("<|belief|>")
if len(belief) >= 2 and "<|endofbelief|>" in belief[1]:
belief = belief[1].split("<|endofbelief|>")[0].strip()
else:
belief = ""
action = predict[idx].split("<|action|>")
if len(action) >= 2 and "<|endofaction|>" in action[1]:
action = action[1].split("<|endofaction|>")[0].strip()
else:
action = ""
response = predict[idx].split("<|response|>")
if len(response) >= 2:
response = response[1].split("<|")[0].strip()
else:
response = ""
data[i]["turns"][j]["response"] = response
seen = True
for k in range(len(data[i]["turns"][j-1]["frames"])):
if data[i]["turns"][j-1]["frames"][k]["service"] not in seen_services:
seen = False
parsedbelief = belief.split(", ")
for k in range(len(parsedbelief)):
parsed = False
for ds in domain_slots:
if parsedbelief[k].startswith(ds):
parsedbelief[k] = [ds, parsedbelief[k][len(ds):].strip()]
parsed = True
break
if not parsed:
parsedbelief[k] = [parsedbelief[k]]
k = 1
while k < len(parsedbelief):
if len(parsedbelief[k]) == 1:
parsedbelief[k-1] += parsedbelief[k]
del parsedbelief[k]
else:
k += 1
if len(parsedbelief) >= 1:
if parsedbelief[0][0] not in domain_slots:
del parsedbelief[0]
parsedbelief = {x[0]:x[1:] for x in parsedbelief}
parsedaction = action.split(", ")
for k in range(len(parsedaction)):
parsedaction[k] = parsedaction[k].strip().split()
k = 0
while k < len(parsedaction):
if len(parsedaction[k]) <= 1 or len(parsedaction[k]) > 3:
del parsedaction[k]
else:
k += 1
act_gt = set()
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
act_gt.add((data[i]["turns"][j]["frames"][0]["actions"][k]["act"].lower() + " " + data[i]["turns"][j]["frames"][0]["actions"][k]["slot"]).strip())
act_p = set()
for k in range(len(parsedaction)):
act_p.add(' '.join(parsedaction[k][1:]))
act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
if seen:
seen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
seen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
else:
unseen_act_precision += [len(act_p & act_gt) / len(act_p) if len(act_p) != 0 else 1]
unseen_act_recall += [len(act_p & act_gt) / len(act_gt) if len(act_gt) != 0 else 0]
bleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
bleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
bleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
if seen:
seenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
seenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
seenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
else:
unseenbleu += [bleuscorer([response.lower()], [[data[i]["turns"][j]["delex"].lower()]])]
if len(data[i]["turns"][j]["delexaug"]) > 0:
unseenbleua += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"]]])]
unseenbleub += [bleuscorer([response.lower()], [[a.lower() for a in data[i]["turns"][j]["delexaug"] + [data[i]["turns"][j]["delex"].lower()]]])]
for k in range(len(data[i]["turns"][j-1]["frames"])):
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"] = {}
for ds in parsedbelief:
if ds.split()[0].lower() == data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower():
data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][ds.split()[1]] = parsedbelief[ds]
idx += 1
if not os.path.exists(predictionfolder + folder):
os.makedirs(predictionfolder + folder)
with open(predictionfolder + folder + fn, "w") as f:
json.dump(data, f, indent=1)
act_precision = sum(act_precision) / len(act_precision)
act_recall = sum(act_recall) / len(act_recall)
print("act", act_precision, act_recall, 2*act_precision*act_recall/(act_precision+act_recall))
print('bleu:', sum(bleu)/len(bleu)) #BLEU-4_{orig}
print('bleua:', sum(bleua)/len(bleua)) #BLEU-4_{aug}
#print('bleub:', sum(bleub)/len(bleub))
seen_act_precision = sum(seen_act_precision) / len(seen_act_precision)
seen_act_recall = sum(seen_act_recall) / len(seen_act_recall)
print("act (seen):", seen_act_precision, seen_act_recall, 2*seen_act_precision*seen_act_recall/(seen_act_precision+seen_act_recall))
unseen_act_precision = sum(unseen_act_precision) / len(unseen_act_precision)
unseen_act_recall = sum(unseen_act_recall) / len(unseen_act_recall)
print("act (unseen):", unseen_act_precision, unseen_act_recall, 2*unseen_act_precision*unseen_act_recall/(unseen_act_precision+unseen_act_recall))
print('bleu (seen):', sum(seenbleu)/len(seenbleu))
print('bleua (seen):', sum(seenbleua)/len(seenbleua))
#print('bleub (seen):', sum(seenbleub)/len(seenbleub))
print('bleu (unseen):', sum(unseenbleu)/len(unseenbleu))
print('bleua (unseen):', sum(unseenbleua)/len(unseenbleua))
#print('bleub (unseen):', sum(unseenbleub)/len(unseenbleub))
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import copy
import random
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--all", default=False, type=bool, required=False, help="use all dialogues rather than only augmented dialogues")
parser.add_argument("--delexlevel", default=2, type=int, required=False, help="0: no delex; 1: delex values in \"slots\"; 2: delex values in both \"slots\" and \"actions\"")
parser.add_argument("--data", default="./accentor-sgd/", type=str, required=False, help="path to SGD")
parser.add_argument("--target", default="./simpletod/", type=str, required=False, help="path to output")
args = parser.parse_args()
datafolder = args.data
targetfolder = args.target
for folder in ["train", "dev", "test"]:
if not os.path.exists(targetfolder + folder):
os.makedirs(targetfolder + folder)
inlm = []
inlme = []
inlma = []
inlmb = []
incc = []
inlmf = []
fns = os.listdir(datafolder + folder)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue"):
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
with open(targetfolder + folder + "/" + fn, "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
continue
with open(datafolder + folder + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
i = 0
while i < len(data):
dbs = []
slots = {}
canmap = {}
vmap = {}
for j in range(len(data[i]["turns"])):
if data[i]["turns"][j]["speaker"] != "SYSTEM":
continue
if "service_results" in data[i]["turns"][j]["frames"][0]:
dbs += data[i]["turns"][j]["frames"][0]["service_results"]
if len(data[i]["turns"][j]["frames"][0]["slots"]) != 0:
slots = {}
for k in range(len(data[i]["turns"][j]["frames"][0]["actions"])):
assert(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"]) == len(data[i]["turns"][j]["frames"][0]["actions"][k]["values"]))
for l in range(len(data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"])):
canmap[data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]
vmap[data[i]["turns"][j]["frames"][0]["actions"][k]["canonical_values"][l]] = data[i]["turns"][j]["frames"][0]["actions"][k]["values"][l]
for k in range(len(data[i]["turns"][j]["frames"][0]["slots"])):
s = data[i]["turns"][j]["frames"][0]["slots"][k]["slot"]
slots[s] = data[i]["turns"][j]["utterance"][data[i]["turns"][j]["frames"][0]["slots"][k]["start"]:data[i]["turns"][j]["frames"][0]["slots"][k]["exclusive_end"]]
db = {}
for k in range(len(dbs)):
matched = True
for s in slots:
if s not in dbs[k]:
matched = False
break
if dbs[k][s] != canmap[slots[s]]:
matched = False
break
if matched:
db = copy.deepcopy(dbs[k])
for s in db:
if db[s] in vmap:
db[s] = vmap[db[s]]
break
data[i]["turns"][j]["frames"][0]["selecteddbslots"] = slots
data[i]["turns"][j]["frames"][0]["selecteddb"] = db
for j in range(1, len(data[i]["turns"]), 2):
domain = data[i]["turns"][j]["frames"][0]["service"].split("_")[0].lower()
assert(data[i]["turns"][j]["speaker"] == "SYSTEM")
assert(len(data[i]["turns"][j]["frames"]) == 1)
slots = copy.deepcopy(data[i]["turns"][j]["frames"][0]["slots"])
slots.sort(key = lambda x : -x["start"])
delex = data[i]["turns"][j]["utterance"]
delexed = set()
if args.delexlevel >= 1:
for k in range(1, len(slots)):
assert(slots[k-1]["start"] >= slots[k]["exclusive_end"])
for k in range(len(slots)):
domain_slot = domain + "_" + slots[k]["slot"]
delex = delex[:slots[k]["start"]] + "[" + domain_slot + "]" + delex[slots[k]["exclusive_end"]:]
delexed.add(domain_slot)
if args.delexlevel >= 2:
slots2 = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
slots2 = [x for x in slots2 if len(x["values"]) > 0]
slots2.sort(key = lambda x : -len(x["values"][0]))
for k in range(len(slots2)):
domain_slot = domain + "_" + slots2[k]["slot"]
if domain_slot in delexed:
continue
for l in range(len(slots2[k]["values"])):
delex = delex.replace(slots2[k]["values"][l], "[" + domain_slot + "]")
delexed.add(domain_slot)
data[i]["turns"][j]["delex"] = delex
target = ''
belief = []
for k in range(len(data[i]["turns"][j-1]["frames"])):
for slot in data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"]:
belief += [[data[i]["turns"][j-1]["frames"][k]["service"].split("_")[0].lower(), slot, data[i]["turns"][j-1]["frames"][k]["state"]["slot_values"][slot]]]
belief.sort(key = lambda x : x[0] + " " + x[1])
for k in range(len(belief)):
belief[k][2].sort()
belief[k][2] = belief[k][2][0]
belief = [x[0] + " " + x[1] + " " + x[2] for x in belief]
target += '<|belief|> ' + ", ".join(belief) + ' <|endofbelief|> '
action = copy.deepcopy(data[i]["turns"][j]["frames"][0]["actions"])
action.sort(key = lambda x : x["act"])
action = [domain + " " + x["act"].lower() + " " + x["slot"] for x in action]
targetaug = []
delexaug = []
tcpos = []
tcneg = []
for k in range(len(data[i]["turns"][j]["beginning"])):
if "social" in data[i]["turns"][j]["beginning"][k]["justification"] or "useful" in data[i]["turns"][j]["beginning"][k]["justification"]:
delexaug += [data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex]
targetaug += [target + '<|action|> ' + "chitchat, " + ", ".join(action) + ' <|endofaction|> ' + '<|response|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' ' + delex + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["beginning"][k]["candidate"].strip() + ' <|endofchitchat|> ']
for k in range(len(data[i]["turns"][j]["end"])):
if "social" in data[i]["turns"][j]["end"][k]["justification"] or "useful" in data[i]["turns"][j]["end"][k]["justification"]:
delexaug += [delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip()]
targetaug += [target + '<|action|> ' + ", ".join(action) + ", chitchat" + ' <|endofaction|> ' + '<|response|> ' + delex + ' ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofresponse|>']
tcpos += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
else:
tcneg += [' <|task|> ' + delex + ' <|endoftask|> ' + '<|chitchat|> ' + data[i]["turns"][j]["end"][k]["candidate"].strip() + ' <|endofchitchat|> ']
target += '<|action|> ' + ", ".join(action) + ' <|endofaction|> '
target += '<|response|> ' + delex + ' <|endofresponse|>'
data[i]["turns"][j]["target"] = target
data[i]["turns"][j]["targetaug"] = targetaug
data[i]["turns"][j]["delexaug"] = delexaug
context = '<|context|> '
for k in range(j):
if k % 2 == 0:
context += '<|user|> '
else:
context += '<|system|> '
context += data[i]["turns"][k]["utterance"] + " "
context += '<|endofcontext|>'
data[i]["turns"][j]["context"] = context
inlm += [(context + target).replace("\n", " ").replace("\r", "")]
assert("\n" not in inlm[-1])
inlme += [(context).replace("\n", " ").replace("\r", "")]
if len(targetaug) != 0:
for k in range(len(targetaug)):
inlma += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmb += [(context + targetaug[k]).replace("\n", " ").replace("\r", "")]
inlmf += [(context + tcpos[k] + targetaug[k]).replace("\n", " ").replace("\r", "")]
for l in range(len(tcneg)):
inlmf += [(context + tcneg[l] + targetaug[k]).replace("\n", " ").replace("\r", "")]
else:
inlmb += [(context + target).replace("\n", " ").replace("\r", "")]
for k in range(len(tcneg)):
inlmf += [(context + tcneg[k] + target).replace("\n", " ").replace("\r", "")]
incc += [context.replace('<|context|>', '').replace('<|endofcontext|>', '').replace('<|user|>', 'user:').replace('<|system|>', 'system:').replace('\t', ' ').strip(), '[DONE]']
i += 1
with open(targetfolder + folder + "/" + fn, "w") as f:
json.dump(data, f, indent=1)
random.shuffle(inlm)
with open("lm.input."+folder+".txt", "w", encoding='utf8') as f: #SimpleTOD
f.write('\n'.join(inlm))
with open("lm.input."+folder+".eval.txt", "w", encoding='utf8') as f: #used as the input during evaluation of SimpleTOD and SimpleTOD extension
f.write('\n'.join(inlme))
with open("lm.input."+folder+".aug.txt", "w", encoding='utf8') as f: #SimpleTOD extension (augmented responses only)
f.write('\n'.join(inlma))
with open("lm.input."+folder+".both.txt", "w", encoding='utf8') as f: #SimpleTOD extension (all responses)
f.write('\n'.join(inlmb))
with open("lm.input."+folder+".cc.txt", "w", encoding='utf8') as f: #cc: chitchat
f.write('\n'.join(incc+['[EXIT]']))
with open("lm.input."+folder+".ff.txt", "w", encoding='utf8') as f: #ff: free-form
f.write('\n'.join(inlmf))
if __name__ == '__main__':
random.seed(42)
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import argparse
import numpy as np
import json
from tqdm import tqdm
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
parser = argparse.ArgumentParser()
parser.add_argument("--no_cuda", action="store_true", help="avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--model_name_or_path", type=str, default="output", help="path to pre-trained model or shortcut name")
parser.add_argument("--input", type=str, help="input text file, each line corresponding to one instance")
parser.add_argument("--output", type=str, help="output file")
parser.add_argument("--eos_token_id", type=int, default=None, help="eos token id")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--jobid", type=int, default=0, help="jobid")
parser.add_argument("--jobnum", type=int, default=1, help="jobnum")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path, pad_token='<PAD>')
model.to(args.device)
with open(args.input, "r") as f:
prompts = f.read().strip().split("\n")
batch_size = args.batch_size
ret = []
for batch in tqdm(range(args.jobid, len(prompts), batch_size * args.jobnum)):
prompt_text = prompts[batch: batch+batch_size]
encodings_dict = tokenizer.batch_encode_plus(prompt_text, max_length=None, pad_to_max_length=True)
input_ids = torch.tensor(encodings_dict['input_ids'])
attn_mask = torch.tensor(encodings_dict['attention_mask'])
seq_len = len(input_ids[0])
num_tokens_to_produce = 1024 - seq_len
pad_token_id = tokenizer.pad_token_id
eos_token_id = args.eos_token_id
if eos_token_id is None:
eos_token_id = tokenizer.eos_token_id
eos_not_in_sents = torch.ones(input_ids.shape[0]).long()
last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1
start_idx = inp_idx = (last_non_masked_idx).view(-1, 1).repeat(1, tokenizer.vocab_size + len(tokenizer.additional_special_tokens)).unsqueeze(1)
past = None
position_ids = torch.tensor([list(range(seq_len)) for i in range(input_ids.shape[0])])
for i, position_ids_slice in enumerate(position_ids):
position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]
input_ids = input_ids.to(args.device)
attn_mask = attn_mask.to(args.device)
eos_not_in_sents = eos_not_in_sents.to(args.device)
start_idx = start_idx.to(args.device)
position_ids = position_ids.to(args.device)
for step in range(num_tokens_to_produce):
outputs = model(input_ids, attention_mask=attn_mask, position_ids=position_ids)
if step == 0:
next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)
else:
next_token_logits = outputs[0][:, -1, :]
next_tokens = torch.argmax(next_token_logits, dim=-1)
eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())
tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (1 - eos_not_in_sents)
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
attn_mask = torch.cat([attn_mask, torch.ones((attn_mask.shape[0], 1)).long().to(args.device)], dim=1)
position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
if torch.max(eos_not_in_sents) == 0:
break
ret += [tokenizer.decode(output, skip_special_tokens=False, clean_up_tokenization_spaces=True).replace("<|endoftext|>", "") for output in input_ids]
with open(args.output, "w") as f:
json.dump(ret, f, indent=1)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data", default="./simpletod/", type=str, required=False, help="path to delexed & augmented SGD")
args = parser.parse_args()
def clean(x):
return x.replace("\n", "").replace("\r", "").replace("\t", " ").strip()
random.seed(42)
pairs = {}
pos = {}
tot = {}
for s in ["train", "dev", "test"]:
pairs[s] = []
pos[s] = 0
tot[s] = 0
fns = os.listdir(args.data + s)
fns.sort()
for fn in fns:
if not fn.startswith("dialogue") or not fn.endswith(".json"):
continue
with open(args.data + s + "/" + fn, "r", encoding='utf8') as f:
data = json.load(f)
for i in range(len(data)):
t = ''
for j in range(len(data[i]["turns"])):
for ps in ["beginning", "end"]:
if ps in data[i]["turns"][j]:
for k in range(len(data[i]["turns"][j][ps])):
tot[s] += 1
if data[i]["turns"][j][ps][k]["label"] == "good":
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 1 if ps == "beginning" else 2]
pairs[s] += [pair]
pos[s] += 1
else:
pair = [t, data[i]["turns"][j]["delex"], clean(data[i]["turns"][j][ps][k]["candidate"]), 0]
pairs[s] += [pair]
if t != '':
t += ' '
if j % 2 == 0:
t += 'user: '
else:
t += 'system: '
t += clean(data[i]["turns"][j]["utterance"])
for s in pos:
print(s, pos[s], tot[s], pos[s]/tot[s])
for s in pairs:
print(s, len(pairs[s]))
random.shuffle(pairs["train"])
with open("arranger_input.json", "w", encoding='utf8') as f:
json.dump(pairs, f, indent=1)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import nltk
def bleuscorer(hyps, refs):
#print(hyps, refs)
bleu = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split()
ref = [a.split() for a in ref]
#hyp = nltk.word_tokenize(hyp)
#ref = [nltk.word_tokenize(a) for a in ref]
bleu += [nltk.translate.bleu_score.sentence_bleu(ref, hyp)]
return sum(bleu) / len(bleu)
if __name__ == '__main__':
print(bleuscorer(['the the the the the the the', 'there is a cat', 'it is'], [["the cat is on the mat", "there is a cat on the mat"], ["there is a cat on the mat"], ["it is true"]]))
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForMultipleChoice, BertTokenizer,
RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer)
from transformers import AdamW, get_linear_schedule_with_warmup
import torch.nn as nn
from utils_multiple_choice import (convert_examples_to_features, processors)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForMultipleChoice, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer),
}
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
best_dev_acc, best_dev_loss = 0.0, 99999999999.0
best_steps = 0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
if results["eval_acc"] > best_dev_acc:
best_dev_acc = results["eval_acc"]
best_dev_loss = results["eval_loss"]
best_steps = global_step
if args.do_test:
results_test = evaluate(args, model, tokenizer, test=True)
for key, value in results_test.items():
tb_writer.add_scalar('test_{}'.format(key), value, global_step)
logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step))
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_vocabulary(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step, best_steps
def evaluate(args, model, tokenizer, prefix="", test=False):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert'] else None,
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
output_logits_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_logits.txt")
with open(output_logits_file, "w") as writer:
logits_list = list(preds)
for i in range(len(logits_list)):
for j in range(len(logits_list[i])):
writer.write(str(logits_list[i][j]))
if j == len(logits_list[i]) - 1:
writer.write("\n")
else:
writer.write(" ")
preds = np.argmax(preds, axis=1)
acc = simple_accuracy(preds, out_label_ids)
result = {"eval_acc": acc, "eval_loss": eval_loss}
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test)))
writer.write("model =%s\n" % str(args.model_name_or_path))
writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps *
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
writer.write("train num epochs=%d\n" % args.num_train_epochs)
writer.write("fp16 =%s\n" % args.fp16)
writer.write("max seq length =%d\n" % args.max_seq_length)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
# Load data features from cache or dataset file
if evaluate:
cached_mode = 'dev'
elif test:
cached_mode = 'test'
else:
cached_mode = 'train'
assert (evaluate == True and test == True) == False
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
cached_mode,
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif test:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
logger.info("Training number: %s", str(len(examples)))
features = convert_examples_to_features(
examples,
label_list,
args.max_seq_length,
tokenizer,
pad_on_left=False,
pad_token_segment_id=0
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set')
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
best_steps = 0
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if args.do_test and args.local_rank in [-1, 0]:
if not args.do_train:
args.output_dir = args.model_name_or_path
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, test=True)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
if best_steps:
logger.info("best steps of eval acc is the following checkpoints: %s", best_steps)
return results
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import os
import sys
from io import open
import json
import csv
import glob
import tqdm
from typing import List
from transformers import PreTrainedTokenizer
import random
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for multiple choice"""
def __init__(self, example_id, question, contexts, endings, label=None):
"""Constructs a InputExample.
Args:
example_id: Unique id for the example.
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
question: string. The untokenized text of the second sequence (question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.example_id = example_id
self.question = question
self.contexts = contexts
self.endings = endings
self.label = label
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for input_ids, input_mask, segment_ids in choices_features
]
self.label = label
class DataProcessor(object):
"""Base class for data converters for multiple choice data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class ACCProcessor(DataProcessor):
def __init__(self):
self.D = [[], [], []]
datasetfile = "arranger_input.json"
with open(datasetfile, "r") as f:
data = json.load(f)
for sid in range(2):
dt = ["train", "dev"][sid]
for i in range(len(data[dt])):
d = [data[dt][i][0].lower(), data[dt][i][1].lower(), data[dt][i][2].lower(), data[dt][i][3]]
self.D[sid] += [d]
sid = 2
for fns in [["lm.input.dev.cc.txt", "lm.output.dev.cc.txt", "dev.inference.gpt2_10epoch_1e-3_fp16.json"],
["lm.input.test.cc.txt", "lm.output.test.cc.txt", "test.inference.gpt2_10epoch_1e-3_fp16.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
self.D[2] += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
acc_id = "%s-%d" % (set_type, i)
examples.append(
InputExample(
example_id=acc_id,
question="",
contexts=[data[i][0], data[i][0], data[i][0]],
endings=[data[i][1], data[i][2] + " " + data[i][1], data[i][1] + " " + data[i][2]],
label=str(data[i][3])))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
pad_token_segment_id=0,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_features = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if example.question.find("_") != -1:
text_b = example.question.replace("_", ending)
else:
text_b = example.question + " " + ending
inputs = tokenizer.encode_plus(
text_a,
text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length
assert len(attention_mask) == max_length
assert len(token_type_ids) == max_length
choices_features.append((input_ids, attention_mask, token_type_ids))
label = label_map[example.label]
if ex_index < 2:
logger.info("*** Example ***")
logger.info("race_id: {}".format(example.example_id))
for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids))))
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id=example.example_id,
choices_features=choices_features,
label=label,
)
)
return features
processors = {
"acc": ACCProcessor,
}
MULTIPLE_CHOICE_TASKS_NUM_LABELS = {
"acc", 3
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
for fns in [["./lm.input.dev.eval.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.dev.eval.ff.txt"],
["./lm.input.test.eval.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "lm.input.test.eval.ff.txt"]]:
with open(fns[0], "r", encoding='utf8') as f:
context = f.read().strip().split("\n")
with open(fns[1], "r", encoding='utf8') as f:
cc = f.read().strip()
cc = cc.split("[TransformerGenerator]:")[1:]
for i in range(len(cc)):
cc[i] = cc[i].split("\n")[0].strip()
with open(fns[2], "r", encoding='utf8') as f:
task = json.load(f)
print(len(context), len(cc), len(task))
assert(len(context) == len(cc))
assert(len(cc) == len(task))
with open(fns[3], "w", encoding='utf8') as f:
for i in range(len(cc)):
t = task[i].split("<|response|>")
if len(t) >= 2:
t = t[-1].strip()
else:
t = ""
b = task[i].split("<|belief|>")
if len(b) >= 2:
b = b[1].split("<|endofbelief|>")
if len(b) == 2:
b = b[0]
else:
b = ""
else:
b = ""
f.write(context[i] + " <|task|> " + t + " <|endoftask|> <|chitchat|> " + cc[i] + ' <|endofchitchat|> <|belief|>' + b + "<|endofbelief|>\n")
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
with open("./acc_arranger_roberta_base_3epoch/is_test_true_eval_logits.txt", "r") as f:
model_outputs = f.read().strip().split("\n")
for i in range(len(model_outputs)):
model_outputs[i] = model_outputs[i].split()
for j in range(len(model_outputs[i])):
model_outputs[i][j] = float(model_outputs[i][j])
assert(len(model_outputs[i]) == 3)
print(len(model_outputs))
for fns in [["./lm.input.dev.cc.txt", "./lm.output.dev.cc.txt", "./dev.inference.gpt2_10epoch_1e-3_fp16.json", "./dev.inference.arranger_3epoch.json"],
["./lm.input.test.cc.txt", "./lm.output.test.cc.txt", "./test.inference.gpt2_10epoch_1e-3_fp16.json", "./test.inference.arranger_3epoch.json"]]:
with open(fns[0], "r") as f:
data = f.read().split("\n")[0:-1:2]
print(len(data))
data_d = data
with open(fns[1], "r") as f:
data = f.read()
data = data.split("[TransformerGenerator]:")[1:]
for i in range(len(data)):
data[i] = data[i].split("\n")[0].strip()
print(len(data))
data_cc = data
with open(fns[2], "r") as f:
data = json.load(f)
print(len(data))
eval_data = []
for i in range(len(data)):
data[i] = data[i].split("<|response|>")
if len(data[i]) == 1:
data[i] += ['']
elif len(data[i]) > 2:
data[i] = ["<|response|>".join(data[i][:-2]), data[i][-1]]
eval_data += [[data_d[i].strip(), data[i][1], data_cc[i].strip(), 0]]
print(len(eval_data))
stats = {0:0, 1:0, 2:0}
for i in range(len(data)):
assert(len(model_outputs[i]) == 3)
o = 0
for j in range(1, 3):
if model_outputs[i][j] > model_outputs[i][o]:
o = j
stats[o] += 1
if o == 0:
data[i] = "<|response|>".join(data[i])
elif o == 1:
data[i] = data[i][0] + "<|response|> " + data_cc[i].strip() + " " + data[i][1].strip()
else:
data[i] = data[i][0] + "<|response|> " + data[i][1].strip() + " " + data_cc[i].strip()
print(len(data), len(model_outputs))
print(stats)
model_outputs = model_outputs[len(data):]
with open(fns[3], "w", encoding='utf8') as f:
json.dump(data, f, indent=1)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./MultiWOZ_2.1/data.json", type=str, required=False, help="Path to the MultiWOZ dataset.")
args = parser.parse_args()
with open("candidates-multiwoz.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
with open(args.source, "r", encoding='utf8') as f:
data = json.load(f)
data = {x:data[x] for x in data if x in augmentation}
for x in data:
for i in range(1, len(data[x]["log"]), 2):
data[x]["log"][i]["beginning"] = []
data[x]["log"][i]["end"] = []
for cc in augmentation[x]:
data[x]["log"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open("accentor-multiwoz-1k.json", "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import argparse
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source", default="./dstc8-schema-guided-dialogue", type=str, required=False, help="Path to the SGD dataset.")
parser.add_argument("--target", default="./accentor-sgd", type=str, required=False, help="The target directory to store ACCENTOR-SGD.")
args = parser.parse_args()
with open("candidates-sgd.json", "r", encoding='utf8') as f:
augmentation = json.load(f)
for subdir in ["train", "dev", "test"]:
targetdir = os.path.join(args.target, subdir)
sourcedir = os.path.join(args.source, subdir)
os.makedirs(targetdir, exist_ok=True)
fns = os.listdir(sourcedir)
for fn in fns:
if not fn.endswith(".json"):
continue
with open(os.path.join(sourcedir, fn), "r", encoding='utf8') as f:
data = json.load(f)
if fn.startswith("dialogue"):
for i in range(len(data)):
for j in range(1, len(data[i]["turns"]), 2):
data[i]["turns"][j]["beginning"] = []
data[i]["turns"][j]["end"] = []
for cc in augmentation[subdir + data[i]["dialogue_id"]]:
data[i]["turns"][cc[0]][cc[1]] += [{"candidate": cc[2], "label": cc[3], "justification": cc[4]}]
with open(os.path.join(targetdir, fn), "w", encoding='utf8') as f:
json.dump(data, f, indent=1, ensure_ascii=False)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from Slimmable - https://github.com/JiahuiYu/slimmable_networks
import torch
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss.mean()
class KLLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification
output: output logits of the student network
target: output logits of the teacher network
T: temperature
KL(p||q) = Ep \log p - \Ep log q
"""
def forward(self, output, soft_logits, target=None, temperature=1., alpha=0.9):
output, soft_logits = output / temperature, soft_logits / temperature
soft_target_prob = torch.nn.functional.softmax(soft_logits, dim=1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
kd_loss = -torch.sum(soft_target_prob * output_log_prob, dim=1)
if target is not None:
n_class = output.size(1)
target = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
ce_loss = -torch.bmm(target, output_log_prob).squeeze()
loss = alpha*temperature* temperature*kd_loss + (1.0-alpha)*ce_loss
else:
loss = kd_loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
def __init__(self, label_smoothing=0.1):
super(CrossEntropyLossSmooth, self).__init__()
self.eps = label_smoothing
""" label smooth """
def forward(self, output, target):
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - self.eps) + self.eps / n_class
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
loss = -torch.bmm(target, output_log_prob)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
def f_divergence(q_logits, p_logits, alpha, iw_clip=1e3):
assert isinstance(alpha, float)
q_prob = torch.nn.functional.softmax(q_logits, dim=1).detach()
p_prob = torch.nn.functional.softmax(p_logits, dim=1).detach()
q_log_prob = torch.nn.functional.log_softmax(q_logits, dim=1) #gradient is only backpropagated here
importance_ratio = p_prob / q_prob
if abs(alpha) < 1e-3:
importance_ratio = importance_ratio.clamp(0, iw_clip)
f = -importance_ratio.log()
f_base = 0
rho_f = importance_ratio.log() - 1.0
elif abs(alpha - 1.0) < 1e-3:
f = importance_ratio * importance_ratio.log()
f_base = 0
rho_f = importance_ratio
else:
iw_alpha = torch.pow(importance_ratio, alpha)
iw_alpha = iw_alpha.clamp(0, iw_clip)
f = iw_alpha / alpha / (alpha - 1.0)
f_base = 1.0 / alpha / (alpha - 1.0)
rho_f = iw_alpha / alpha + f_base
loss = torch.sum(q_prob * (f - f_base), dim=1)
grad_loss = -torch.sum(q_prob * rho_f * q_log_prob, dim=1)
return loss, grad_loss
"""
It's often necessary to clip the maximum
gradient value (e.g., 1.0) when using this adaptive KD loss
"""
class AdaptiveLossSoft(torch.nn.modules.loss._Loss):
def __init__(self, alpha_min=-1.0, alpha_max=1.0, iw_clip=5.0):
super(AdaptiveLossSoft, self).__init__()
self.alpha_min = alpha_min
self.alpha_max = alpha_max
self.iw_clip = iw_clip
def forward(self, output, target, alpha_min=None, alpha_max=None):
alpha_min = alpha_min or self.alpha_min
alpha_max = alpha_max or self.alpha_max
loss_left, grad_loss_left = f_divergence(output, target, alpha_min, iw_clip=self.iw_clip)
loss_right, grad_loss_right = f_divergence(output, target, alpha_max, iw_clip=self.iw_clip)
ind = torch.gt(loss_left, loss_right).float()
loss = ind * grad_loss_left + (1.0 - ind) * grad_loss_right
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import random
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import models
from utils.config import setup
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from evaluate import attentive_nas_eval as attentive_nas_eval
import utils.logging as logging
import argparse
"""
using multiple nodes to run evolutionary search:
1) each GPU will evaluate its own sub-networks
2) all evaluation results will be aggregated on GPU 0
"""
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/parallel_supernet_evo_search.yml')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
parser.add_argument('--seed', default=1, type=int,
help='default random seed')
run_args = parser.parse_args()
logger = logging.get_logger(__name__)
def eval_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging("stdout.log", 'w')
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
torch.cuda.set_device(args.gpu)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# build the supernet
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
model = comm.get_parallel_model(model, args.gpu) #local rank
# define loss function (criterion)
criterion = nn.CrossEntropyLoss().cuda()
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
assert args.resume
#reloading model
model.module.load_weights_from_pretrained_models(args.resume)
if train_sampler:
train_sampler.set_epoch(0)
targeted_min_flops = args.evo_search.targeted_min_flops
targeted_max_flops = args.evo_search.targeted_max_flops
# run evolutionary search
parent_popu = []
for idx in range(args.evo_search.parent_popu_size):
if idx == 0:
cfg = model.module.sample_min_subnet()
else:
cfg = model.module.sample_active_subnet_within_range(
targeted_min_flops, targeted_max_flops
)
cfg['net_id'] = f'net_{idx % args.world_size}_evo_0_{idx}'
parent_popu.append(cfg)
pareto_global = {}
for evo in range(args.evo_search.evo_iter):
# partition the set of candidate sub-networks
# and send them to each GPU for parallel evaluation
# sub-networks to be evaluated on GPU {args.rank}
my_subnets_to_be_evaluated = {}
n_evaluated = len(parent_popu) // args.world_size * args.world_size
for cfg in parent_popu[:n_evaluated]:
if cfg['net_id'].startswith(f'net_{args.rank}_'):
my_subnets_to_be_evaluated[cfg['net_id']] = cfg
# aggregating all evaluation results
eval_results = attentive_nas_eval.validate(
my_subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
)
# update the Pareto frontier
# in this case, we search the best FLOPs vs. accuracy trade-offs
for cfg in eval_results:
f = round(cfg['flops'] / args.evo_search.step) * args.evo_search.step
if f not in pareto_global or pareto_global[f]['acc1'] < cfg['acc1']:
pareto_global[f] = cfg
# next batch of sub-networks to be evaluated
parent_popu = []
# mutate
for idx in range(args.evo_search.mutate_size):
while True:
old_cfg = random.choice(list(pareto_global.values()))
cfg = model.module.mutate_and_reset(old_cfg, prob=args.evo_search.mutate_prob)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_mutate_{idx}'
parent_popu.append(cfg)
# cross over
for idx in range(args.evo_search.crossover_size):
while True:
cfg1 = random.choice(list(pareto_global.values()))
cfg2 = random.choice(list(pareto_global.values()))
cfg = model.module.crossover_and_reset(cfg1, cfg2)
flops = model.module.compute_active_subnet_flops()
if flops >= targeted_min_flops and flops <= targeted_max_flops:
break
cfg['net_id'] = f'net_{idx % args.world_size}_evo_{evo}_crossover_{idx}'
parent_popu.append(cfg)
if __name__ == '__main__':
# setup enviroments
args = setup(run_args.config_file)
args.dist_url = run_args.dist_url
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
assert args.world_size > 1, "only support DDP settings"
# Use torch.multiprocessing.spawn to launch distributed processes: the
# eval_worker process function
mp.spawn(eval_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
import operator
from datetime import date
import torch
import torch.nn as nn
#from torch.utils.tensorboard import SummaryWriter
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from data.data_loader import build_data_loader
from utils.config import setup
import utils.saver as saver
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
import utils.logging as logging
from evaluate import attentive_nas_eval as attentive_nas_eval
from solver import build_optimizer, build_lr_scheduler
import models
from copy import deepcopy
import numpy as np
import loss_ops as loss_ops
parser = argparse.ArgumentParser(description='AlphaNet Training')
parser.add_argument('--config-file', default=None, type=str,
help='training configuration')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
logger = logging.get_logger(__name__)
def build_args_and_env(run_args):
assert run_args.config_file and os.path.isfile(run_args.config_file), 'cannot locate config file'
args = setup(run_args.config_file)
args.config_file = run_args.config_file
#load config
assert args.distributed and args.multiprocessing_distributed, 'only support DDP training'
args.distributed = True
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
args.dist_url = run_args.dist_url
args.models_save_dir = os.path.join(args.models_save_dir, args.exp_name)
if not os.path.exists(args.models_save_dir):
os.makedirs(args.models_save_dir)
#backup config file
saver.copy_file(args.config_file, '{}/{}'.format(args.models_save_dir, os.path.basename(args.config_file)))
args.checkpoint_save_path = os.path.join(
args.models_save_dir, 'alphanet.pth.tar'
)
args.logging_save_path = os.path.join(
args.models_save_dir, f'stdout.log'
)
return args
def main():
run_args = parser.parse_args()
args = build_args_and_env(run_args)
random.seed(args.seed)
torch.manual_seed(args.seed)
#cudnn.deterministic = True
#warnings.warn('You have chosen to seed training. '
# 'This will turn on the CUDNN deterministic setting, '
# 'which can slow down your training considerably! '
# 'You may see unexpected behavior when restarting '
# 'from checkpoints.')
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
assert args.world_size > 1, 'only support ddp training'
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
args.batch_size_total = args.batch_size * args.world_size
#rescale base lr
args.lr_scheduler.base_lr = args.lr_scheduler.base_lr * (max(1, args.batch_size_total // 256))
# set random seed, make sure all random subgraph generated would be the same
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed(args.seed)
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging(args.logging_save_path, 'w')
logger.info(f"Use GPU: {args.gpu}, machine rank {args.machine_rank}, num_nodes {args.num_nodes}, \
gpu per node {ngpus_per_node}, world size {args.world_size}")
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
args.local_rank = args.gpu
torch.cuda.set_device(args.gpu)
# build model
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
# use sync batchnorm
if getattr(args, 'sync_bn', False):
model.apply(
lambda m: setattr(m, 'need_sync', True))
model = comm.get_parallel_model(model, args.gpu) #local rank
logger.info(model)
criterion = loss_ops.CrossEntropyLossSmooth(args.label_smoothing).cuda(args.gpu)
soft_criterion = loss_ops.AdaptiveLossSoft(args.alpha_min, args.alpha_max, args.iw_clip).cuda(args.gpu)
if not getattr(args, 'inplace_distill', True):
soft_criterion = None
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
args.n_iters_per_epoch = len(train_loader)
logger.info( f'building optimizer and lr scheduler, \
local rank {args.gpu}, global rank {args.rank}, world_size {args.world_size}')
optimizer = build_optimizer(args, model)
lr_scheduler = build_lr_scheduler(args, optimizer)
# optionally resume from a checkpoint
if args.resume:
saver.load_checkpoints(args, model, optimizer, lr_scheduler, logger)
logger.info(args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
args.curr_epoch = epoch
logger.info('Training lr {}'.format(lr_scheduler.get_lr()[0]))
# train for one epoch
acc1, acc5 = train_epoch(epoch, model, train_loader, optimizer, criterion, args, \
soft_criterion=soft_criterion, lr_scheduler=lr_scheduler)
if comm.is_master_process() or args.distributed:
# validate supernet model
validate(
train_loader, val_loader, model, criterion, args
)
if comm.is_master_process():
# save checkpoints
saver.save_checkpoint(
args.checkpoint_save_path,
model,
optimizer,
lr_scheduler,
args,
epoch,
)
def train_epoch(
epoch,
model,
train_loader,
optimizer,
criterion,
args,
soft_criterion=None,
lr_scheduler=None,
):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
num_updates = epoch * len(train_loader)
for batch_idx, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
num_subnet_training = max(2, getattr(args, 'num_arch_training', 2))
optimizer.zero_grad()
### compute gradients using sandwich rule ###
# step 1 sample the largest network, apply regularization to only the largest network
drop_connect_only_last_two_stages = getattr(args, 'drop_connect_only_last_two_stages', True)
model.module.sample_max_subnet()
model.module.set_dropout_rate(args.dropout, args.drop_connect, drop_connect_only_last_two_stages) #dropout for supernet
output = model(images)
loss = criterion(output, target)
loss.backward()
with torch.no_grad():
soft_logits = output.clone().detach()
#step 2. sample the smallest network and several random networks
sandwich_rule = getattr(args, 'sandwich_rule', True)
model.module.set_dropout_rate(0, 0, drop_connect_only_last_two_stages) #reset dropout rate
for arch_id in range(1, num_subnet_training):
if arch_id == num_subnet_training-1 and sandwich_rule:
model.module.sample_min_subnet()
else:
model.module.sample_active_subnet()
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits)
else:
assert not args.inplace_distill
loss = criterion(output, target)
loss.backward()
#clip gradients if specfied
if getattr(args, 'grad_clip_value', None):
torch.nn.utils.clip_grad_value_(model.parameters(), args.grad_clip_value)
optimizer.step()
#accuracy measured on the local batch
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
corr1, corr5, loss = acc1*args.batch_size, acc5*args.batch_size, loss.item()*args.batch_size #just in case the batch size is different on different nodes
stats = torch.tensor([corr1, corr5, loss, args.batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1/batch_size, corr5/batch_size, loss/batch_size
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
else:
losses.update(loss.item(), images.size(0))
top1.update(acc1, images.size(0))
top5.update(acc5, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
num_updates += 1
if lr_scheduler is not None:
lr_scheduler.step()
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
return top1.avg, top5.avg
def validate(
train_loader,
val_loader,
model,
criterion,
args,
distributed = True,
):
subnets_to_be_evaluated = {
'attentive_nas_min_net': {},
'attentive_nas_max_net': {},
}
acc1_list, acc5_list = attentive_nas_eval.validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration = True,
)
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from AttentiveNAS (https://github.com/facebookresearch/AttentiveNAS)
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
from datetime import date
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models
from utils.config import setup
from utils.flops_counter import count_net_flops_and_params
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from utils.progress import AverageMeter, ProgressMeter, accuracy
import argparse
parser = argparse.ArgumentParser(description='Test AlphaNet Models')
parser.add_argument('--config-file', default='./configs/eval_alphanet_models.yml')
parser.add_argument('--model', default='a0', type=str, choices=['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a5_1', 'a6'])
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
run_args = parser.parse_args()
if __name__ == '__main__':
args = setup(run_args.config_file)
args.model = run_args.model
args.gpu = run_args.gpu
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.__dict__['active_subnet'] = args.__dict__['pareto_models'][args.model]
print(args.active_subnet)
train_loader, val_loader, train_sampler = build_data_loader(args)
## init static attentivenas model with weights inherited from the supernet
model = models.model_factory.create_model(args)
model.to(args.gpu)
model.eval()
# bn running stats calibration following Slimmable (https://arxiv.org/abs/1903.05134)
# please consider trying a different random seed if you see a small accuracy drop
with torch.no_grad():
model.reset_running_stats_for_calibration()
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
images = images.cuda(args.gpu, non_blocking=True)
model(images) #forward only
model.eval()
with torch.no_grad():
criterion = nn.CrossEntropyLoss().cuda()
from evaluate.imagenet_eval import validate_one_subnet
acc1, acc5, loss, flops, params = validate_one_subnet(val_loader, model, criterion, args)
print(acc1, acc5, flops, params)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
constrain_decreasing = False
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = lamb_from_increments_torch(x)
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
if constrain_decreasing:
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
else:
bounds = [(1e-12, np.inf)] + [(-10, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
#G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
x0 = np.array([D/(math.sqrt(n)) for _ in range(n)])
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = x
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf) for _ in range(n)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = xopt_inc
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
import torch
n = 1000
G = 1.0
D = 1.0
Gsq = G**2
Dsq = D**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
lamb = lamb_from_increments_torch(x)
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5*Gsq/lamb_sum # Gradient error term
t2 *= torch.sum(lamb_sq_flip)
inner_cumsum = torch.cumsum(lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*Gsq*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
def func1d(x_raw):
eta = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
t1 = Dsq/(2*n*eta)
t2 = Gsq*eta/2
t3 = (Gsq*eta/2)*torch.sum(1/torch.arange(1, n))
fval = (t1+t2+t3)#/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(eta.grad.numpy()))
return (fval.item(), g)
xopt_1d, fopt_1d, dopt_1d = scipy.optimize.fmin_l_bfgs_b(
func1d, np.array([y0[0]]), bounds = [(1e-8, 100)],
iprint = 0
)
assert dopt_1d['warnflag'] == 0
xopt_1d = xopt_1d[0]
print(f"1D grad: {dopt_1d['grad']}")
print(f"1D Steps to convergence: {dopt_1d['funcalls']}")
#print(f"grad: {dopt_1d['grad']}")
print(f"eta 1d: {xopt_1d}")
print(f"1D fval: {fopt_1d}")
theory_eta = D/(G*math.sqrt(n*(2+math.log(n-1))))
theory1d = (D*G*math.sqrt(2+math.log(n-1))/math.sqrt(n))#/max(G/D,D/G)
print(f"Theory eta: {theory_eta}")
print(f"theory 1d fval: {theory1d}")
print(f"1d/full ratio: {fopt_1d/fopt}")
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(2, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat Dsq={D} Gsq={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
#ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(2, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat D={D} G={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.set_yscale('log')
plt.tight_layout()
fname = "lamb_lbfgs.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
from fairseq.optim import FairseqOptimizer, register_optimizer
logger = logging.getLogger(__name__)
def gmean(input_x):
log_x = torch.log(input_x.flatten())
return torch.exp(torch.mean(log_x))
class AdaGradFlex(torch.optim.Optimizer):
"""
Adagrad with coordinate-wise flex statistics.
"""
def __init__(
self, params: _params_t,
lr: float = 1.0,
momentum: float = 0,
log_every: int = 0,
weight_decay: float = 0.0,
eps: float = 1e-20,
decouple: bool = True,
):
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if momentum < 0:
raise ValueError(f"Momentum {momentum} must be non-negative")
print(f"Weight decay: {weight_decay}")
defaults = dict(lr=lr,
momentum=momentum,
eps=eps,
weight_decay=weight_decay,
log_every=log_every,
k = 0,
numerator_weighted=0.0,
decouple=decouple)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
group = self.param_groups[0]
momentum = group['momentum']
ck = 1 - momentum
log_every = group['log_every']
for group in self.param_groups:
eps = group["eps"]
k = group['k']
decay = group['weight_decay']
decouple = group['decouple']
lr = group['lr']
below_one = 0
total = 0
######
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if "alphak" not in state:
state["alphak"] = torch.zeros_like(p.data).detach()
#state["gsq"] = torch.zeros_like(p.data).detach()
state["gmax"] = torch.zeros_like(p.data).detach()
state['sk'] = torch.zeros_like(p.data).detach()
if momentum > 0:
state["z"] = torch.clone(p.data).detach()
state['flex'] = torch.zeros_like(p.data).detach()
sk = state['sk']
#gsq = state['gsq']
alphak = state['alphak']
gmax = state['gmax']
flex = state['flex']
if grad.is_sparse:
grad = grad.to_dense()
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
flex.add_(grad*grad).sub_(grad * sk)
alphak.copy_(alphak.fmax(flex))
gmax.copy_(gmax.fmax(grad.abs()))
sk.add_(grad)
if decay != 0 and decouple:
p_old = p.data.clone()
if momentum > 0:
z = state['z']
z.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
p.data.mul_(1-ck).add_(z, alpha=ck)
if decay != 0 and decouple:
z.add_(p_old, alpha=-decay * lr)
else:
p.data.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
if decay != 0 and decouple:
p.data.add_(p_old, alpha=-decay * lr)
### Logging
# below_one += ((alphak+eps)/(gmax*gmax + eps) < 1).sum().item()
# total += grad.numel()
# if k % 50 == 0 and k > 0:
# print(f"fraction below 1: {below_one/total}")
# ratio = (alphak+eps)/(gmax*gmax + eps)
# print(f"mean: {ratio.mean()} gmean: {gmean(ratio)} std: {ratio.std()}")
# qs = [0.0, 0.05, 0.10, 0.25, 0.50, 0.75, 0.90, 0.95, 1.0]
# quantiles = torch.quantile(ratio, q=torch.tensor(qs).cuda())
# print(f"quantiles: {list(zip(qs, quantiles))}")
group['k'] = k + 1
return loss
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import matplotlib.pyplot as plt
from datasets import transformations
import torch
import numpy as np
def plot_x2_reconstructions(
pairs, model, indices, train_set, save_name,
):
"""
Plots sample x2 reconstructions based on indices
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
title = "Training Reconstruction" if train_set else "Test Reconstruction"
fig, axs = plt.subplots(len(indices), 3, figsize=(6, 12))
fig.suptitle(title, fontsize=16)
for i, sample_idx in enumerate(indices):
x1, x2, params = pairs[sample_idx]
n_pixels = x1.shape[1]
try:
# for weakly supervised autoencoder
x2_reconstruction = model(x1.unsqueeze(0), x2.unsqueeze(0), params)
except TypeError:
# for real autoencoder
x2_reconstruction = model(x1.unsqueeze(0), params)
axs[i][0].imshow(x1.squeeze())
axs[i][0].set_title("x1")
axs[i][1].imshow(x2.squeeze())
axs[i][1].set_title("x2")
axs[i][2].imshow(
x2_reconstruction.cpu().detach().numpy().reshape(n_pixels, n_pixels)
)
axs[i][2].set_title("x2 from tranformed z1")
if save_name:
plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
def plot_x1_reconstructions(pairs, model, indices, train_set, save_name):
"""
Plots sample x2 reconstructions based on indices
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
title = "Training Reconstructions" if train_set else "Test Reconstructions"
fig, axs = plt.subplots(len(indices), 2, figsize=(5, 12))
fig.suptitle(title, fontsize=16)
for i, sample_idx in enumerate(indices):
x1, x2, params = pairs[sample_idx]
n_pixels = x1.shape[1]
x1_reconstruction = model(x1.unsqueeze(0)).cpu().detach().numpy()
axs[i][0].imshow(x1.squeeze())
axs[i][0].set_title("x1")
axs[i][1].imshow(x1_reconstruction.reshape(n_pixels, n_pixels))
axs[i][1].set_title("x1 reconstruction")
if save_name:
plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
def plot_rotations(
X,
model,
n_transformations,
title,
save_name=None,
param_name="angle",
use_latent_op=True,
):
"""Plots all rotated reconstructions for given samples"""
font_size = 18
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, 12))
fig.suptitle(title, fontsize=16)
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=font_size)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
transformation_params = get_all_transformations(param_name, n_transformations)
for i, param in enumerate(transformation_params):
if use_latent_op:
x2_reconstruction = model.reconstruct_x2(x1.unsqueeze(1), param)
else:
x2_reconstruction = model.reconstruct_transformed_x1(
x1.unsqueeze(1), param
)
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
if param_name == "angle":
axs[sample_i, i + 1].set_title(
f"{param.angle:0.0f}{degree_sign}", fontsize=font_size
)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show()
def plot_transformations_complex(
X,
model,
title,
save_name=None,
param_name="angle",
supervised=False,
):
"""Plots all rotated reconstructions for given samples"""
font_size = 18
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
transformation_params = transformations.get_transform_params(model.data.n_rotations,
model.data.n_x_translations, model.data.n_y_translations, (1.0, ))
n_transformations = len([i for i in transformation_params])
fig, axs = plt.subplots(n_samples, n_transformations + 1, figsize=(16, int(12/5.*len(X))))
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=font_size)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
x1 = x1.to(model.device)
z1 = model.encoder(x1)
transformation_params = transformations.get_transform_params(model.data.n_rotations,
model.data.n_x_translations, model.data.n_y_translations, (1.0, ))
for i, param in enumerate(transformation_params):
shifts = torch.LongTensor([[i]])
if supervised:
z_transformed = model.transform(z1, [shifts])
else:
z_transformed = model.transform(z1, torch.LongTensor([[i]]))
x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy()
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
if param_name == "angle":
axs[sample_i, i + 1].set_title(
f"{param.angle:0.0f}{degree_sign}", fontsize=font_size
)
elif param_name == "tx":
axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f}", fontsize=font_size)
elif param_name == 'ty':
axs[sample_i, i + 1].set_title(f"{param.shift_y:0.0f}", fontsize=font_size)
else:
axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f},{param.shift_y:0.0f}",
fontsize=font_size)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show()
def get_all_transformations(param_name, n_transformations):
if param_name == "angle":
return transformations.get_transform_params(n_transformations, 0, 0, (1.0,))
elif param_name == "shift_x":
return transformations.get_transform_params(0, n_transformations, 0, (1.0,))
elif param_name == "shift_y":
return transformations.get_transform_params(0, 0, n_transformations, (1.0,))
def plot_rotations_translations(X, model, n_transformations, n_rot, n_x, n_y, save_name=None):
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, int(12/5.*len(X))))
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=16)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
x1 = x1.to(model.device)
transformation_params = [t for t in transformations.get_transform_params(n_rot, n_x, n_y, (1.0, ))]
z = model.encoder(x1)
angle = None
shift_x = None
shift_y = None
t_list = []
i = 0
for _, t in enumerate(range(n_transformations+1)):
j = np.random.randint(len(transformation_params))
param = transformation_params[j]
if not t in t_list:
shifts = model.return_shifts([param])
z_transformed = model.transform(z, shifts)
x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy()
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
axs[sample_i, i + 1].set_title(f"{param.angle:0.0f}{degree_sign}\n{param.shift_x:0.0f},{param.shift_y:0.0f}", fontsize=16)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
angle = param.angle
shift_x = param.shift_x
shift_y = param.shift_y
i += 1
if i+1 >= n_transformations + 2:
break
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show() |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""
Launches experiments locally or on the cluster
python run_experiments.py [name] --cluster
OPTIONS:
python run_experiments.py linear-mnist-test --data mnist
python run_experiments.py cci-autoencoder-shapes --architecture CCI
"""
import argparse
import autoencoder
import cci_variational_autoencoder
import os
import itertools
from datasets import datasets
from functools import partial
import torch
import shutil
import submitit
BASE_PARAMS = {
"seed": [0, 10, 20, 30, 40],
"n_epochs": [30],
"learning_rate": [0.001, 0.0005],
}
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"running on {device}")
def run_cci_vae_shapes(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on Simple Shapes. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
shapes = datasets.SimpleShapes(
batch_size,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
n_classes=n_classes,
seed=seed,
pairs=False,
)
train_cci_vae_variants(
shapes, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def run_cci_vae_mnist(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
proportion=0.01,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
pairs=False,
)
train_cci_vae_variants(
mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def run_cci_vae_single_digit_mnist(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
proportion=0.01,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveSingleDigitMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
pairs=False,
)
train_cci_vae_variants(
mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def train_cci_vae_variants(
data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
):
"""Trains CCI, Beta, and standard VAE"""
print("Training CCI VAE")
cci_vae_folder = os.path.join(folder, "cci_vae")
train_cci_vae(
data,
beta,
c_max,
z_dim,
n_epochs,
learning_rate,
distribution,
seed,
cci_vae_folder,
)
print("Training Beta VAE")
beta_vae_folder = os.path.join(folder, "beta_vae")
train_cci_vae(
data,
beta,
0.0,
z_dim,
n_epochs,
learning_rate,
distribution,
seed,
beta_vae_folder,
)
print("Training VAE")
vae_folder = os.path.join(folder, "vae")
train_cci_vae(
data, 1.0, 0.0, z_dim, n_epochs, learning_rate, distribution, seed, vae_folder
)
def run_autoencoder_shapes(
z_dim=1000,
batch_size=16,
n_epochs=30,
learning_rate=0.0005,
seed=0,
folder=None,
architecture="Linear",
n_classes=300,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution=None,
use_latent_op=True,
):
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
shapes = datasets.SimpleShapes(
batch_size,
n_classes=n_classes,
seed=seed,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
)
if use_latent_op:
train_autoencoder(
shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
else:
train_standard_autoencoder(
shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
def run_autoencoder_mnist(
z_dim=1000,
batch_size=16,
n_epochs=2,
learning_rate=0.0005,
seed=0,
folder=None,
architecture="Linear",
proportion=0.01,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution=None,
use_latent_op=True,
):
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
)
if use_latent_op:
print("using latent_op")
train_autoencoder(
mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
else:
train_standard_autoencoder(
mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
def train_standard_autoencoder(
data, z_dim, n_epochs, learning_rate, seed, folder, architecture
):
model = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model.run()
model.save_best_validation(os.path.join(folder, "standard-autoencoder"))
def train_autoencoder(data, z_dim, n_epochs, learning_rate, seed, folder, architecture):
model_disentangled_rotation = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="DisentangledRotation",
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model_disentangled_rotation.run()
model_disentangled_rotation.save_best_validation(
os.path.join(folder, "disentangled-operator")
)
model_shift_operator = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="ShiftOperator",
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model_shift_operator.run()
model_shift_operator.save_best_validation(os.path.join(folder, "shift-operator"))
def train_cci_vae(
data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
):
cci_vae = cci_variational_autoencoder.CCIVariationalAutoEncoder(
data,
beta=beta,
c_max=c_max,
z_dim=z_dim,
seed=seed,
learning_rate=learning_rate,
n_epochs=n_epochs,
distribution=distribution,
)
cci_vae.train()
cci_vae.save_best_validation(folder)
def launch_single_job(experiment, base_dir, results_dir, **kwargs):
log_folder = base_dir + "%j"
executor = submitit.AutoExecutor(folder=log_folder)
# executor.update_parameters(timeout_min=600, gpus_per_node=1)
executor.update_parameters(
timeout_min=240, gpus_per_node=1,
)
job = executor.submit(experiment, folder=results_dir, **kwargs)
print("job id", job.job_id)
print(f"logging to: {base_dir + job.job_id}")
print(f"results stored at: {results_dir}")
result = job.result()
print(f"job result: {result}")
def launch_sweep(experiment, params, base_dir, experiment_dir):
log_folder = base_dir + "%j"
executor = submitit.AutoExecutor(folder=log_folder)
# executor.update_parameters(timeout_min=600, gpus_per_node=1)
executor.update_parameters(
timeout_min=600, gpus_per_node=1,
)
jobs = []
with executor.batch():
for i, param in enumerate(params):
print("running with param ", param)
param["folder"] = os.path.join(experiment_dir, f"{i}")
job = executor.submit(experiment, **param)
jobs.append(job)
print(f"launched {len(params)} jobs")
print("sweep id", jobs[0].job_id)
print(f"logging to: {base_dir}{jobs[0].job_id}")
results = [job.result() for job in jobs]
print(f"job results: {results}")
def get_params(args):
params = BASE_PARAMS
if args.data == "mnist":
params["batch_size"] = [8, 16, 32, 64]
elif args.data == "shapes":
params["batch_size"] = [4, 8, 16, 32]
if args.model == "cci_vae":
params["n_epochs"] = [10, 20, 50]
params["beta"] = [4.0, 10.0, 100.0, 1000.0]
params["z_dim"] = [10, 30]
return params
def get_param_combinations(params):
"""Returns a list of dictionaries with all combinations"""
keys, values = zip(*params.items())
params_combinations = [dict(zip(keys, v)) for v in itertools.product(*values)]
return params_combinations
def get_directories(args, cluster=False):
user = os.environ["USER"]
if cluster:
RESULTS_DIR = f"/checkpoint/{user}/Equivariance/"
base_dir = f"/checkpoint/{user}/jobs/{args.name}/"
else:
RESULTS_DIR = os.path.expanduser(
"~/Dropbox/FAIR/Projects/Equivariance/experiments/results"
)
base_dir = os.path.expanduser(
"~/Dropbox/FAIR/Projects/Equivariance/experiments/jobs/{args.name}/"
)
experiment_dir = os.path.join(RESULTS_DIR, args.name)
# clean experimental directory
if os.path.exists(experiment_dir):
shutil.rmtree(experiment_dir)
return base_dir, experiment_dir
def get_experiment_function(args):
experiments = {
"run_autoencoder_shapes": run_autoencoder_shapes,
"run_autoencoder_mnist": run_autoencoder_mnist,
"run_cci_vae_shapes": run_cci_vae_shapes,
"run_cci_vae_mnist": run_cci_vae_mnist,
"run_cci_vae_single_digit_mnist": run_cci_vae_mnist,
}
experiment = experiments[f"run_{args.model}_{args.data}"]
print(f"run_{args.model}_{args.data}")
if args.data == "shapes":
experiment = partial(experiment, n_classes=args.n_classes)
elif args.data in {"mnist", "single_digit_mnist"}:
experiment = partial(experiment, proportion=args.mnist_proportion)
else:
raise ValueError(f"dataset {args.data} not supported")
# standard autoencoder
if "autoencoder" == args.model and args.no_latent_op:
experiment = partial(experiment, use_latent_op=False)
n_rotations, n_x_translations, n_y_translations = get_n_transformations(args)
experiment = partial(
experiment,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
architecture=args.architecture,
z_dim=args.z_dim,
distribution=args.distribution,
)
return experiment
def get_n_transformations(args):
n_rotations, n_x_translations, n_y_translations = 0, 0, 0
n_transformations = 9
if args.transformation == "rotation":
n_rotations = n_transformations
if args.transformation == "shift_x":
n_x_translations = n_transformations
if args.transformation == "shift_y":
n_y_translations = n_transformations
return n_rotations, n_x_translations, n_y_translations
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage="python run_experiments --cluster",
description="runs experiments with specified parameters",
)
parser.add_argument("name", help="name of experiment")
parser.add_argument(
"--model",
help="model for experiments. Example: autoencoder, cci_vae",
default="autoencoder",
)
parser.add_argument(
"--architecture", help="name of autoencoder architecture", default="Linear",
)
parser.add_argument(
"--data",
help="dataset used for training: mnist, single_digit_mnist",
default="shapes",
)
parser.add_argument(
"--mnist_proportion",
help="proportion of mnist to use",
default=0.01,
type=float,
)
parser.add_argument(
"--n_classes",
help="number of classes to use for simple shapes",
default=300,
type=int,
)
parser.add_argument(
"--z_dim", help="dataset used for training", default=1000, type=int
)
parser.add_argument(
"--transformation",
choices=["rotation", "shift_x", "shift_y"],
type=str.lower,
default="rotation",
)
parser.add_argument(
"--distribution",
help="likelihood distribution used for computing loss in CCI VAE",
choices=["gaussian", "bernoulli"],
type=str.lower,
default="gaussian",
)
parser.add_argument("--beta", help="beta used for CCI VAE", default=1000, type=int)
parser.add_argument(
"--no_latent_op",
help="use standard autoencoder without latent operators",
action="store_true",
)
parser.add_argument("--cluster", action="store_true")
parser.add_argument("--sweep", action="store_true")
return parser
if __name__ == "__main__":
parser = init_argparse()
args = parser.parse_args()
experiment = get_experiment_function(args)
base_dir, experiment_dir = get_directories(args, cluster=args.cluster)
if args.cluster and args.sweep:
params = get_params(args)
params_combinations = get_param_combinations(params)
launch_sweep(experiment, params_combinations, base_dir, experiment_dir)
elif args.cluster:
launch_single_job(
experiment, base_dir, experiment_dir,
)
else:
print("running single local job")
experiment(folder=experiment_dir)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from collections import OrderedDict
from abc import ABC
class ResNetExplorer(nn.Module):
"""
Loads a pre-trained model and hook on one of its layer
"""
def __init__(self, path_to_model="pytorch/vision:v0.6.0", model="resnet152"):
super().__init__()
self.pretrained_model = torch.hub.load(path_to_model, model, pretrained=True)
def create_full_model(self, layer_to_explore, layer_to_explore_size, image_size):
all_layers = dict(list(self.pretrained_model.named_children()))
all_keys = list(
all_layers.keys()
) # TODO: I am not sure the order is preserved ?
max_index = all_keys.index(layer_to_explore)
##### ENCODER
# take all layers up to the one we want to explore for the encoder
encoder_layers = [
(all_keys[i], all_layers[layer])
for i, layer in enumerate(all_layers)
if i <= max_index
]
layers = OrderedDict()
for layer in encoder_layers:
name = layer[0]
layers[name] = layer[1]
# create a new model with it (saves time during feed-forward if we take other layers than the last one)
self.fixed_encoder = nn.Sequential(layers)
##### Linear layer to learn the mapping
self.linear = nn.Linear(layer_to_explore_size, layer_to_explore_size)
##### DECODER
self.decoder = nn.Linear(layer_to_explore_size, image_size)
def forward(self, x):
z = self.fixed_encoder(x)
# feed flattened z to linear
z_prime = self.linear(z.view(x.size(0), -1))
x_dec = self.decoder(z_prime)
# sigmoid to have something between 0 and 1
x_dec = torch.sigmoid(x_dec)
# map to image shape
return x_dec.view(x.size())
class LinearEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.fc1 = nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
def forward(self, x):
out = x.flatten(start_dim=1)
out = self.fc1(out)
return out
class LinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_pixels = n_pixels
self.n_channels = n_channels
self.fc1 = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
def forward(self, x):
out = self.fc1(x)
out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out
class ComplexLinearEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.fc1r = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
self.fc1i = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
def forward(self, x):
out = x.flatten(start_dim=1)
outr = self.fc1r(out)
outi = self.fc1i(out)
return (outr, outi)
class ComplexLinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_pixels = n_pixels
self.n_channels = n_channels
self.fc1r = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
self.fc1i = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
def forward(self, x):
r1 = self.fc1r(x[0])
r2 = -self.fc1i(x[1])
out_r = r1 + r2
out_r = out_r.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out_r
class CCIEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(256, z_dim),
)
def forward(self, x):
out = self.encoder(x)
return out
class CCIDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256),
nn.ReLU(),
Lambda(lambda x: x.view(-1, 256, 1, 1)),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(64, n_pixels, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(32 * 32, n_pixels * n_pixels),
Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)),
)
def forward(self, x):
out = self.decoder(x)
return out
class NonLinearEncoder(nn.Module):
def __init__(self, n_pixels, n_chanels, z_dim):
super().__init__()
self.fc1 = nn.Linear(n_pixels ** 2, n_pixels // 2)
self.batch_norm = nn.BatchNorm1d(n_pixels // 2)
self.fc2 = nn.Linear(n_pixels // 2, z_dim)
def forward(self, x):
out = x.flatten(start_dim=1)
out = self.fc1(out)
out = self.batch_norm(out)
out = torch.relu(out)
out = self.fc2(out)
out = torch.relu(out)
return out
class NonLinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_channels = n_channels
self.n_pixels = n_pixels
self.fc1 = nn.Linear(z_dim, (n_pixels ** 2) // 2)
self.batch_norm = nn.BatchNorm1d((n_pixels ** 2) // 2)
self.fc2 = nn.Linear((n_pixels ** 2) // 2, n_pixels ** 2)
def forward(self, x):
out = self.fc1(x)
out = self.batch_norm(out)
out = torch.relu(out)
# reshape
out = self.fc2(out)
out = torch.relu(out)
out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out
class VAEBase(ABC):
@staticmethod
def reparameterize(mu, log_var):
"""Returns z_sample from mu, var"""
std = torch.exp(log_var / 2)
# z_sample = torch.normal(mu, std)
# eps = Variable(torch.randn_like(std))
eps = torch.randn_like(std)
z_sample = mu + eps.mul(std)
return z_sample
@staticmethod
def latent_sample(mu, log_var, num_std):
"""Generates sample based on mu, var that's num_std away from mean"""
std = torch.exp(log_var / 2)
z_sample = (num_std * std).add(mu)
return z_sample
class LinearCCIVAE(nn.Module, VAEBase):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.z_dim = z_dim
self.encoder = LinearEncoder(n_pixels, n_channels, 2 * z_dim)
self.decoder = LinearDecoder(n_pixels, n_channels, z_dim)
def forward(self, x):
z_dist = self.encoder(x)
mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :]
# reparameterize
z_sample = LinearCCIVAE.reparameterize(mu, log_var)
out = self.decoder(z_sample)
return out, mu, log_var
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class CCIVAE(nn.Module, VAEBase):
"""Model Architecture from CCI-VAE paper
https://arxiv.org/abs/1804.03599
Encoder:
4 convolutional layers, each with 32 channels, 4x4 kernels, and a stride of 2.
Followed by 2 fully connected layers, each of 256 units
Latent Space: 20 units (10 for mean, 10 for variance)
Decoder:
transpose of encoder with ReLU activations
"""
def __init__(self, n_pixels, n_channels, z_dim, distribution="gaussian"):
super().__init__()
self.z_dim = z_dim
self.n_channels = n_channels
self.distribution = distribution
self.encoder = nn.Sequential(
nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1),
nn.ReLU(),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(256, 2 * z_dim),
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256),
nn.ReLU(),
Lambda(lambda x: x.view(-1, 256, 1, 1)),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(64, n_pixels, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.ReLU(),
nn.Linear(32 * 32, n_pixels * n_pixels),
Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)),
nn.Sigmoid(),
)
def forward(self, x):
z_dist = self.encoder(x)
mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :]
# tanh log_var didn't seem to help
# log_var = torch.tanh(log_var)
z_sample = CCIVAE.reparameterize(mu, log_var)
out = self.decoder(z_sample)
return out, mu, log_var
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import functools
import pdb
class ShiftOperator:
"""Performs discrete shift based on n_rotations."""
def __init__(self, n_rotations, device):
self.n_rotations = n_rotations
self.device = device
self.translation_matrices = self.generate_shift_operator_matrices(
n_rotations + 1
)
def __call__(self, z_batch, angles):
"""Interface for Autoencoder"""
return self.translate_batch(z_batch, angles)
def translate_batch(self, z_batch, angles):
"""Applies shift operator to batch
Args:
angles (array of floats): counter-clockwise rotation in degrees.
"""
smallest_angle = 360 / (self.n_rotations + 1)
if angles.dim() > 1:
shifts = angles[:, 0] / smallest_angle
else:
shifts = angles / smallest_angle
try:
translated_batch = [
self.translate(z, shifts[i].long()) for i, z in enumerate(z_batch)
]
except IndexError as e:
print("===ANGLES ARE", angles)
raise e
return torch.stack(translated_batch)
def translate(self, z, shift):
"""Translate latent
Args:
z (1-dim tensor): latent vector
shift (int): amount by which to shift.
shift of 0 corresponds to the identity.
"""
# reshape into 2D tensor
z_2d = z.reshape(self.n_rotations + 1, -1)
translation_matrix = self.translation_matrices[shift]
# move to cpu if tensor is cpu. Used for eval
if not z_2d.is_cuda:
translation_matrix = translation_matrix.cpu()
# translation
z_2d_shifted = translation_matrix.matmul(z_2d)
# reshape back
z_shifted = z_2d_shifted.reshape(z.shape)
return z_shifted
def generate_shift_operator_matrices(self, n_rotations):
"""Generates family of shift operator matrices"""
translation_matrix = np.zeros((n_rotations, n_rotations))
for i in range(n_rotations):
translation_matrix[i, (i + 1) % n_rotations] = 1
translation_matrices = [np.eye(n_rotations, n_rotations)]
T = np.eye(n_rotations, n_rotations)
for i in range(n_rotations - 1):
T = np.dot(translation_matrix, T)
translation_matrices.append(T)
translation_matrices = np.array(translation_matrices)
_translation_matrices = torch.tensor(
translation_matrices, dtype=torch.float32, device=self.device,
)
return _translation_matrices
class ComplexShiftOperator:
"""Performs discrete shift based on n_rotations"""
def __init__(self, cardinals, z_dim, device, unique_transfo=False, index=None):
self.cardinals = cardinals
self.z_dim = z_dim
self.device = device
self.translation_matrices = self.generate_translation_matrices(
self.cardinals, self.z_dim
)
if unique_transfo:
if (np.array(cardinals)>1).sum()==1:
self.index = int((np.array(cardinals)>1).nonzero()[0])
elif (np.array(cardinals)>1).sum()>1:
if index is None:
print("Must provide the index of the operator !")
else:
self.index = index
self.translate_batch = self.translate_batch_unique
else:
self.translate_batch = self.translate_batch_multiple
def __call__(self, z_batch, shifts):
"""Interface for Autoencoder"""
z_batch_r, z_batch_i = z_batch
return self.translate_batch(z_batch_r, z_batch_i, shifts)
def translate_batch_unique(self, z_batch_r, z_batch_i, shifts):
"""Translates batch in the case of a unique transformations (Faster)"""
tr = self.translation_matrices[self.index][0][shifts[:, 0]]
ti = self.translation_matrices[self.index][1][shifts[:, 0]]
z_batch_r_shifted = tr * z_batch_r - ti * z_batch_i
z_batch_i_shifted = tr * z_batch_i + ti * z_batch_r
return (
z_batch_r_shifted,
z_batch_i_shifted,
)
def translate_batch_multiple(self, z_batch_r, z_batch_i, shifts):
"""Translates batch in the case of multiple transformations"""
(Mr, Mi) = self.build_multipliers(shifts)
z_batch_r_shifted = Mr * z_batch_r - Mi * z_batch_i
z_batch_i_shifted = Mr * z_batch_i + Mi * z_batch_r
return (
z_batch_r_shifted,
z_batch_i_shifted,
)
def build_multipliers(self, shifts):
size_batch, n_transfo = shifts.shape
Mr = torch.ones((size_batch, self.z_dim), device=self.device)
Mi = torch.zeros((size_batch, self.z_dim), device=self.device)
for i in range(n_transfo):
tr = self.translation_matrices[i][0][shifts[:, i]]
ti = self.translation_matrices[i][1][shifts[:, i]]
Mr = Mr * tr - Mi * ti
Mi = Mr * ti + Mi * tr
return (Mr, Mi)
def translate(self, zr, zi, shift):
"""Translate latent
Args:
z (1-dim tensor): latent vector
shift (int): amount by which to shift
"""
for i in range(len(shift)):
tr = self.translation_matrices[i][0][shift[i]]
ti = self.translation_matrices[i][1][shift[i]]
zr = zr * tr - zi * ti
zi = zi * tr + zr * ti
return (zr, zi)
def generate_translation_matrices(self, cardinals, z_dim):
"""Generates family of translation matrices"""
def DFT_matrix(cardinal, z_dim):
i, j = np.meshgrid(np.arange(cardinal), np.arange(cardinal))
omega = np.exp(2 * np.pi * 1j / cardinal)
W = np.power(omega, i * j)
return W
# Loop over all transformations that can happen to the sample
XYZ = []
for i, t in enumerate(cardinals):
K = self.cardinals[i]
X_i = np.arange(K)
if z_dim % K: # creates in shift operator an unfinished cycle
second_dim = (
int(np.floor(z_dim / K)) + 1
) # TODO: not sure this is the right way
else: # creates in shift operator a finished cycle
second_dim = int(z_dim / K)
X_i = np.tile(X_i.flatten(), (second_dim))[:z_dim]
XYZ.append(X_i)
_all_translation_matrices = list()
for i in range(len(cardinals)):
translation_matrices = DFT_matrix(cardinals[i], z_dim)
translation_matrices = translation_matrices[:, XYZ[i]]
translation_matrices_r = np.real(translation_matrices)
translation_matrices_i = np.imag(translation_matrices)
_translation_matrices_r = torch.tensor(
translation_matrices_r, dtype=torch.float32, device=self.device,
)
_translation_matrices_i = torch.tensor(
translation_matrices_i, dtype=torch.float32, device=self.device,
)
_all_translation_matrices.append(
(_translation_matrices_r, _translation_matrices_i,)
)
return _all_translation_matrices
class DisentangledRotation:
"""Performs rotation using rotation matrix of the form:
[cos, -sin], [sin, cos]
Args:
n_rotations (int): discrete rotations needed before identity is reached
"""
def __init__(self, n_rotations, device):
self.n_rotations = n_rotations
self.device = device
def __call__(self, z, angles):
"""Interface for Autoencoder"""
return self.rotate_batch(z, angles)
def rotate_batch(self, x_batch, angles):
"""Rotates batch"""
rotated_batch = []
if angles.dim() > 1:
angles = angles[:, 0]
else:
angles = angles
for i, x in enumerate(x_batch):
x_rotated = self.rotate(x, angles[i])
rotated_batch.append(x_rotated)
return torch.stack(rotated_batch)
def rotate(self, x, angle):
"""Clockwise rotation or translation
Args:
x (1D tensor): representing latent vector
angle (float): rotation angle in degrees
Returns: rotated tensor of same shape as x
"""
if x.dim() != 1:
raise ValueError(f"x must be a flattened 1D vector. Got shape {x.shape}")
rotation_matrix = self.get_rotation_matrix(angle, x.shape[0])
if not x.is_cuda:
rotation_matrix = rotation_matrix.cpu()
x_rotated = rotation_matrix.matmul(x)
return x_rotated
@functools.lru_cache()
def get_rotation_matrix(self, angle, dim):
"""Angle is the rotation angle in degrees.
Returns rotation matrix that operates on first two dimensions
"""
rotation_matrix = torch.diag(torch.ones(dim, device=self.device))
if angle == 0.0:
return rotation_matrix
radians = (angle / 360) * torch.tensor(2 * np.pi)
matrix_2d = torch.tensor(
[
[torch.cos(radians), -torch.sin(radians)],
[torch.sin(radians), torch.cos(radians)],
]
)
rotation_matrix[0][0] = matrix_2d[0][0]
rotation_matrix[0][1] = matrix_2d[0][1]
rotation_matrix[1][0] = matrix_2d[1][0]
rotation_matrix[1][1] = matrix_2d[1][1]
return rotation_matrix.to(device=self.device)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import models
import latent_operators
from datasets import datasets
from datasets.data_utils import x_to_image
import plot
import pdb
import os
import shutil
eps = 1e-20
class WeaklyComplexAutoEncoder:
"""Trains a weakly supervised shift operator.
Args:
data (AbstractDataset): contains train and test loaders with angles
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
z_dim=405,
seed=0,
encoder_type="ComplexLinear",
decoder_type="ComplexLinear",
transformation_type=None,
device="cpu",
temperature=1.0,
output_directory="output",
save_name="",
use_softmax=1,
n_rotations = 0,
n_x_translations = 0,
n_y_translations = 0,
scaling_factors = (1, )
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
cardinals = [
n_rotations + 1,
n_x_translations + 1,
n_y_translations + 1,
len(scaling_factors),
]
self.cardinals = cardinals
# SO FAR, THIS MODEL ONLY WORKS FOR 1 TRANSFO
# We grab which one with the following line
assert (np.array(cardinals) > 1).sum()==1
for i, cardinal in enumerate(cardinals):
if cardinal > 1:
self.K = cardinal
self.transfo_index = i
# function used for transformation
self.use_softmax = use_softmax
self.transform = self.get_transformation(transformation_type)
self.temperature = temperature
self.output_dir = output_directory
self.save_name = save_name
self.best_epoch = 0
self.best_mse = 0
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation(self, name):
"""Returns function to performance transformation based name"""
if name is None:
return None
transformation = getattr(latent_operators, name)
return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True)
def train(self, loss_func, learning_rate, n_epochs, log_frequency):
self.encoder.train()
self.decoder.train()
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
optimizer = torch.optim.Adam(params, lr=learning_rate)
train_losses = torch.FloatTensor(n_epochs)
valid_losses = torch.FloatTensor(n_epochs)
best_mse = np.inf
N_pairs = len(self.data.train_loader.dataset)
for epoch in range(n_epochs):
epoch_loss = 0
for i, (x1, x2, angles) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * x1.size(0)
epoch_loss = epoch_loss / N_pairs
print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}")
valid_mse = (
self.compute_mean_loss(loss_func, self.data.valid_loader)
.detach()
.item()
)
# train_mse = (
# self.compute_mean_loss(loss_func, self.data.train_loader)
# .detach()
# .item()
# )
# train_losses[epoch] = train_mse
train_losses[epoch] = epoch_loss
if valid_mse < best_mse:
self.update_state(mse=valid_mse, epoch=epoch)
best_mse = valid_mse
file_name = "checkpoint_{}.pth.tar".format(self.save_name)
self.save_best_checkpoint(
out_dir=self.output_dir,
file_name=file_name,
optimizer_state_dict=optimizer.state_dict(),
)
print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}")
valid_losses[epoch] = valid_mse
return train_losses.detach().numpy(), valid_losses.detach().numpy()
def ifft(self, cps):
second_dim = cps.size(1)
K = len(self.transform.translation_matrices[self.transfo_index][0])
cps_r = cps[..., 0].to(device=self.device)
cps_i = cps[..., 1].to(device=self.device)
tr_r = self.transform.translation_matrices[self.transfo_index][0]
tr_i = self.transform.translation_matrices[self.transfo_index][1]
alternative = cps_r[:, None, ...] * tr_r - cps_i[:, None, ...] * tr_i
alternative = alternative.mean(2) # mean over frequencies
return alternative
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
with torch.no_grad():
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
return x1_reconstruction_r
def reconstruct_x2(self, x1, x2, param=None):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
batch_size = x1.size(0)
with torch.no_grad():
z1 = self.encoder(x1)
z2 = self.encoder(x2)
angles_probas = self.compute_angles_probas(x1, x2, z1, z2)
predicted_angle = angles_probas.detach().argmax(-1, keepdims=True)
z_transformed = self.transform(z1, predicted_angle)
x2_reconstruction_r = self.decoder(z_transformed)
return x2_reconstruction_r
def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if param_name=='angle' != "angle" else "Rotations"
)
plot.plot_transformations_complex(
X,
self,
title,
save_name=save_name,
param_name=param_name,
supervised=False,
)
def plot_x1_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def compute_angles_probas(self, x1, x2, z1, z2):
cps = self.computes_cross_power_spectrum(z1[0], z1[1], z2[0], z2[1])
invfs_alter = self.ifft(cps)
angles_probas = invfs_alter
return angles_probas
def reconstruction_mse_transformed_z1_weak(self, x1, x2, angles, use_argmax=False):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles"""
criterion = torch.nn.MSELoss(reduction="none")
batch_size = x1.size(0)
z1 = self.encoder(x1)
z2 = self.encoder(x2)
prod_size = np.prod(x1.size())
x1_reconstruction_r = self.decoder(z1)
x1_reconstruction_loss = criterion(x1_reconstruction_r, x1)
x1_reconstruction_loss = x1_reconstruction_loss.mean()
# TODO this is not adapted to product of shift operators, it's looking only at the 1st cardinal
# Transform according to all possible angles, weighted
angles_probas = self.compute_angles_probas(x1, x2, z1, z2)
if use_argmax:
predicted_angle = angles_probas.detach().argmax(
-1, keepdims=True
)
z_transformed = self.transform(z1, predicted_angle)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_loss = criterion(x2_reconstruction_r, x2)
x2_reconstruction_loss = x2_reconstruction_loss.mean()
else:
all_angles = torch.arange(self.K).repeat(1, batch_size).view(-1, 1)
temp = self.temperature
mask = torch.softmax(angles_probas / temp, dim=-1)
repeat_z1 = (
z1[0][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1),
z1[1][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1),
)
x2_repeat = (
x2[:, None, ...]
.repeat(1, self.K, 1, 1, 1)
.view(batch_size * self.K, x2.size(1), x2.size(2), x2.size(3))
)
z_transformed = self.transform(repeat_z1, all_angles)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_transformed_loss = (
criterion(x2_reconstruction_r, x2_repeat)
.sum((1, 2, 3)) # sums over image dim
.view(batch_size, -1)
)
x2_reconstruction_loss = (mask * x2_reconstruction_transformed_loss).sum() / prod_size
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def computes_cross_power_spectrum(
self, z_batch_r1, z_batch_i1, z_batch_r2, z_batch_i2
):
"""Computes Cross Power spectrum (no FFT) """
batch_size = z_batch_r1.size(0)
z1z2_batch_r = (
z_batch_r1 * z_batch_r2 + z_batch_i1 * z_batch_i2
) # recall we use the conjugate of z_batch_2, hence the + here
z1z2_batch_i = (
-z_batch_r1 * z_batch_i2 + z_batch_i1 * z_batch_r2
) # recall we use the conjugate of z_batch_2, hence the - in front here
norm_z1z2_batch = ((z1z2_batch_r ** 2 + z1z2_batch_i ** 2)) ** 0.5
cps_r = z1z2_batch_r / norm_z1z2_batch
cps_i = z1z2_batch_i / norm_z1z2_batch
cps = torch.cat([cps_r[..., None], cps_i[..., None]], -1)
return cps
def compute_test_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
N = 0
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
bs = x1.size(0)
loss_batch = loss_func(x1, x2, angles, True)*bs
N += bs
losses.append(loss_batch)
test_loss = torch.stack(losses).sum() / float(N)
self.encoder.train()
self.decoder.train()
return test_loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
loss_batch = loss_func(x1, x2, angles, True)
losses.append(loss_batch)
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def run(
self, learning_rate=0.0005, n_epochs=10, log_frequency=50
):
"""Runs experiment for autoencoder reconstruction."""
loss_func = self.reconstruction_mse_transformed_z1_weak
train_loss, valid_loss = self.train(
loss_func, learning_rate, n_epochs, log_frequency
)
train_mse = self.compute_mean_loss(loss_func, self.data.train_loader)
print(f"Train MSE: {train_mse}")
valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader)
print(f"Valid MSE: {valid_mse}")
test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100)
print(f"Test MSE: {test_mse}")
return train_loss, valid_loss, train_mse, valid_mse, test_mse
def update_state(self, mse, epoch):
self.best_mse = mse
self.best_epoch = epoch
def load_model(self, path_to_checkpoint):
checkpoint = torch.load(path_to_checkpoint)
self.best_epoch = checkpoint["best_epoch"]
self.encoder.load_state_dict(checkpoint["encoder_state_dict"])
self.decoder.load_state_dict(checkpoint["decoder_state_dict"])
self.best_mse = checkpoint["best_mse"]
return checkpoint["best_mse"], checkpoint["best_epoch"]
def get_current_state(self):
return {
"encoder_state_dict": self.encoder.state_dict(),
"decoder_state_dict": self.decoder.state_dict(),
"best_epoch": self.best_epoch,
"best_mse": self.best_mse,
}
def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict):
"""
:param file_name: filename to save checkpoint in.
:param optimizer_state_dict: state of the optimizer.
:return: str to path where the model is saved.
"""
state = self.get_current_state()
state["optimizer_state_dict"] = optimizer_state_dict
best_path = os.path.join(out_dir, "best_" + file_name)
torch.save(state, best_path)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import torch
import json
import os
import random
import numpy as np
import models
import latent_operators
import plot
from datasets import datasets, transformations
class AutoEncoder:
"""Trains an autoencoder on rotated shapes.
Args:
data (AbstractDataset): contains train and test loaders with transformation params
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
shift_x (bool): use shift values instead of angles in supervision.
"""
def __init__(
self,
data,
z_dim=700,
seed=0,
encoder_type="Linear",
decoder_type="Linear",
latent_operator_name=None,
device="cpu",
learning_rate=0.0005,
n_epochs=5,
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.encoder_best_valid = self.encoder
self.decoder_best_valid = self.decoder
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.transformation_param_name = self.get_transformation_param_name()
# function used for latent transformation
self.use_latent_op = False if latent_operator_name is None else True
self.latent_operator_name = latent_operator_name
self.latent_operator = self.get_latent_operator(latent_operator_name)
self.train_losses = []
self.valid_losses = []
self.final_test_loss = None
def __repr__(self):
model = {
"encoder_type": self.encoder_type,
"decoder_type": self.decoder_type,
"z_dim": self.z_dim,
"latent_operator": self.latent_operator_name,
"batch_size": self.data.batch_size,
"learning_rate": self.learning_rate,
"n_epochs": self.n_epochs,
"data": str(self.data),
}
return json.dumps(model)
def save(self, path, indices=None):
os.makedirs(path, exist_ok=True)
self.save_model_configs(path)
self.save_models(path)
self.save_losses(path)
self.save_plots(path)
def save_model_configs(self, path):
model_configs_str = self.__repr__()
model_configs = json.loads(model_configs_str)
file_path = os.path.join(path, "model_configs.json")
with open(file_path, "w") as outfile:
json.dump(model_configs, outfile)
def save_models(self, path):
encoder_path = os.path.join(path, "encoder.pt")
torch.save(self.encoder.state_dict(), encoder_path)
decoder_path = os.path.join(path, "decoder.pt")
torch.save(self.decoder.state_dict(), decoder_path)
def load_models(self, path, device="cpu"):
self.encoder.load_state_dict(
torch.load(os.path.join(path, "encoder.pt"), map_location=device)
)
self.decoder.load_state_dict(
torch.load(os.path.join(path, "decoder.pt"), map_location=device)
)
def save_losses(self, path):
file_path = os.path.join(path, "train_losses.npy")
np.save(file_path, self.train_losses)
file_path = os.path.join(path, "valid_losses.npy")
np.save(file_path, self.valid_losses)
file_path = os.path.join(path, "test_loss.npy")
np.save(file_path, self.final_test_loss)
def save_plots(self, path):
for train_set in [True, False]:
set_name = "train" if train_set else "test"
x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions")
self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set)
# store x2 reconstructions only when using supervised latent operator
if self.use_latent_op:
x2_plot_path = os.path.join(path, f"x2_{set_name}_reconstructions")
self.plot_x2_reconstructions(
save_name=x2_plot_path, train_set=train_set
)
transformation_name = (
"translations"
if self.transformation_param_name != "angle"
else "rotations"
)
multiple_rotations_path = os.path.join(
path, f"x_{set_name}_{transformation_name}"
)
self.plot_multiple_rotations(
save_name=multiple_rotations_path, train_set=train_set
)
def save_best_validation(self, path, indices=None):
self.encoder = self.encoder_best_valid
self.decoder = self.decoder_best_valid
self.save(path, indices=indices)
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation_param_name(self):
"""Returns the parameter used for transformation"""
if self.data.n_rotations > 1:
return "angle"
elif self.data.n_x_translations > 1:
return "shift_x"
elif self.data.n_y_translations > 1:
return "shift_y"
else:
raise ValueError("No transformation found")
def get_latent_operator(self, name):
"""Returns function to performance transformation based name"""
if name is None:
return None
latent_operator = getattr(latent_operators, name)
return latent_operator(self.n_transformations, self.device)
@property
def n_transformations(self):
if self.data.n_rotations > 1:
return self.data.n_rotations
elif self.data.n_x_translations > 1:
return self.data.n_x_translations
elif self.data.n_y_translations > 1:
return self.data.n_y_translations
else:
raise ValueError("No transformation found")
def train(self, loss_func, stop_early=False, log_frequency=None):
self.encoder.train().to(self.device)
self.decoder.train().to(self.device)
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
optimizer = torch.optim.Adam(params, lr=self.learning_rate)
if log_frequency is None:
log_frequency = self.set_log_frequency()
for epoch in range(self.n_epochs):
running_loss = 0.0
print(f"Epoch {epoch}")
self.log_train_val_loss(loss_func)
for i, (x1, x2, params) in enumerate(self.data.train_loader):
print(f"Training batch {i}", end="\r")
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
angles = self.get_angles(params)
angles = angles.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % log_frequency == (log_frequency - 1):
print(f"Running loss: {running_loss / log_frequency:0.3e}")
running_loss = 0.0
if stop_early:
return None
train_loss, valid_loss = self.log_train_val_loss(loss_func)
self.copy_models_validation(valid_loss)
# test loss per sample (using batch size 1)
self.final_test_loss = self.compute_total_loss(
self.data.test_loader_batch_1, loss_func
)
print(f"Test Loss: {self.final_test_loss:0.3e}")
def set_log_frequency(self):
frequency = len(self.data.train_loader) // 10
return frequency
def copy_models_validation(self, valid_loss):
"""Copies models with best validation"""
if valid_loss < np.min(self.valid_losses):
self.encoder_best_valid = copy.deepcopy(self.encoder)
self.decoder_best_valid = copy.deepcopy(self.decoder)
def log_train_val_loss(self, loss_func, show_print=True):
train_loss = self.compute_total_loss(self.data.train_loader, loss_func)
valid_loss = self.compute_total_loss(self.data.valid_loader, loss_func)
self.train_losses.append(train_loss)
self.valid_losses.append(valid_loss)
if show_print:
print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}")
return train_loss, valid_loss
def compute_total_loss(self, loader, loss_func):
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for x1, x2, params in loader:
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
angles = self.get_angles(params)
angles = angles.to(device=self.device)
losses.append(loss_func(x1, x2, angles).cpu())
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def reconstruction_mse_x1(self, x1, x2, angles):
"""Computes MSE x1 reconstruction loss"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x1_reconstruction = self.decoder(z)
loss = criterion(x1_reconstruction, x1)
return loss
def reconstruction_mse_transformed_z1(self, x1, x2, angles):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1)"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x1_reconstruction = self.decoder(z)
x1_reconstruction_loss = criterion(x1_reconstruction, x1)
z_transformed = self.latent_operator(z, angles)
x2_reconstruction_loss = criterion(self.decoder(z_transformed), x2)
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def reconstruction_mse_frozen_z1(self, x1, x2, angles):
"""Reconstruction loss of x2 from x1 without transformations"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x2_reconstruction = self.decoder(z)
loss = criterion(x2_reconstruction, x2)
return loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
losses = []
for x1, x2, params in data_loader:
angles = self.get_angles(params)
losses.append(loss_func(x1, x2, angles).cpu())
mean_loss = torch.stack(losses).mean()
return mean_loss
def get_angles(self, params):
"""Returns tensor of angles for translations in x or rotations."""
param_name = self.transformation_param_name
if param_name in ("shift_x", "shift_y"):
angles = torch.tensor(
[
transformations.shift_to_angle(
getattr(p, param_name), self.n_transformations,
)
for p in params
]
)
else:
angles = torch.tensor([p.angle for p in params])
return angles
def run(self, log_frequency=None, stop_early=False):
"""Runs experiment for autoencoder reconstruction.
Args:
log_frequency (int): number of batches after which to print loss
stop_early (bool): stop after a single log_frequency number of batches.
Useful for testing without waiting for long training.
"""
if self.latent_operator_name is None:
loss_func = self.reconstruction_mse_x1
elif self.latent_operator_name in ["ShiftOperator", "DisentangledRotation"]:
loss_func = self.reconstruction_mse_transformed_z1
# TODO: what is frozen_rotation?
elif self.latent_operator_name == "frozen_rotation":
loss_func = self.reconstruction_mse_frozen_z1
else:
raise ValueError(
f"transformation type {self.transformation_type} not supported"
)
self.train(
loss_func, log_frequency=log_frequency, stop_early=stop_early,
)
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
z = self.encoder(x1)
y = self.decoder(z)
return y
def reconstruct_transformed_x1(self, x1, param):
"""Reconstructs x1 transformed using model"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
x_transformed = transformations.transform(x1.squeeze(0), param)
z = self.encoder(x_transformed.unsqueeze(0))
y = self.decoder(z)
return y
def reconstruct_x2(self, x1, param):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
z = self.encoder(x1)
angle = self.get_angles([param]).unsqueeze(0)
z_transformed = self.latent_operator(z, angle)
x2 = self.decoder(z_transformed)
return x2
def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def plot_multiple_rotations(self, indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if self.transformation_param_name != "angle" else "Rotations"
)
plot.plot_rotations(
X,
self,
self.n_transformations,
title,
save_name=save_name,
param_name=self.transformation_param_name,
use_latent_op=self.use_latent_op,
)
def load_data(configs, path):
data_configs = json.loads(configs["data"])
if "shapes" and "2k-classes" in path:
data = datasets.SimpleShapes(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
n_classes=2000,
seed=0,
)
elif "mnist" in path:
data = datasets.ProjectiveMNIST(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
train_set_proportion=0.01,
valid_set_proportion=0.01,
test_set_proportion=1.0,
seed=0,
)
else:
raise ValueError("data not found")
return data
def load(path):
with open(os.path.join(path, "model_configs.json")) as f:
configs = json.load(f)
data = load_data(configs, path)
model_type = "CCI" if "cci" in path else "Linear"
model = AutoEncoder(
data,
z_dim=configs["z_dim"],
latent_operator_name=configs["latent_operator"],
encoder_type=model_type,
decoder_type=model_type,
)
model.load_models(path)
return model
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
n_epochs = 2
simple_shapes = datasets.SimpleShapes(16)
print("Training Autoencder")
model = AutoEncoder(simple_shapes, device=device, n_epochs=n_epochs)
model.run()
print("Training Autoencder with Latent Translation")
model_with_rotation = AutoEncoder(
simple_shapes,
latent_operator_name="ShiftOperator",
device=device,
n_epochs=n_epochs,
)
model_with_rotation.run()
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""Implements CCI VAE
https://arxiv.org/abs/1804.03599
"""
import torch
import os
import numpy as np
import models
import json
import plot
import copy
import random
from datasets import datasets, transformations
from datasets.data_utils import x_to_image
from sklearn.decomposition import PCA
import matplotlib
import matplotlib.pyplot as plt
class CCIVariationalAutoEncoder:
"""Trains an autoencoder on rotated shapes.
Args:
data (AbstractDataset): contains train and test loaders with angles
model (CCIVAE model): contains forward funtion with encoder / decoder
beta (float): beta in beta-VAE model
c_max (float): maximum value for controlled capacity parameter in CCI VAE.
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
model=models.CCIVAE,
beta=1000.0,
c_max=36.0,
z_dim=30,
seed=0,
device="cpu",
learning_rate=0.0005,
n_epochs=5,
distribution="gaussian",
):
self.beta, self.c_max = beta, c_max
self.c = 0.0
self.z_dim = z_dim
self.data = data
self.device = device
self.model_cls = model
self.model = model(
self.data.n_pixels, self.data.n_channels, z_dim, distribution=distribution
)
self.model.to(device=device)
self.model_best_valid = self.model
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.distribution = distribution
self.seed = seed
self.set_seed()
self.train_losses = []
self.kl_losses = []
self.reconstruction_losses = []
self.valid_losses = []
self.final_test_loss = None
def __repr__(self):
model = {
"model_class": str(self.model_cls),
"beta": self.beta,
"c_max": self.c_max,
"distribution": self.distribution,
"z_dim": self.z_dim,
"batch_size": self.data.batch_size,
"learning_rate": self.learning_rate,
"n_epochs": self.n_epochs,
"data": str(self.data),
}
return json.dumps(model)
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def compute_loss(self, x1):
"""Loss for controlled capacity beta vae (CCI VAE)
https://arxiv.org/abs/1804.03599
"""
if self.distribution == "gaussian":
criterion = torch.nn.MSELoss(reduction="sum")
elif self.distribution == "bernoulli":
criterion = torch.nn.BCELoss(reduction="sum")
else:
raise ValueError(f"distribution {self.distribution} not supported")
# assuming a Gaussian Distribution
out, mu, log_var = self.model(x1)
reconstruction_loss = criterion(out, x1)
# https://arxiv.org/abs/1312.6114
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
kl_divergence = (
-0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).mean(dim=0)
).sum()
return reconstruction_loss, kl_divergence
def train(self, stop_early=False, log_frequency=None, track_losses=True):
"""Trains controlled capacity beta vae (CCI VAE)
https://arxiv.org/abs/1804.03599
Learning rate used in the paper is 5e-4
If verbose is False, previous loss print is overridden
If stop_early is True, training stops after first logged loss.
This is useful for testing.
"""
self.model.train().to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
c_step_size = (self.c_max - self.c) / self.n_epochs
if log_frequency is None:
log_frequency = self.set_log_frequency()
for epoch in range(self.n_epochs):
running_loss = 0.0
print(f"Epoch {epoch}")
if track_losses:
self.log_train_val_loss()
running_loss = 0.0
running_reconstruction_loss, running_kl_divergence = 0.0, 0.0
# update controlled capacity parameter
self.c += c_step_size
for i, (x1, _, _) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
optimizer.zero_grad()
reconstruction_loss, kl_divergence = self.compute_loss(x1)
loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_reconstruction_loss += (
reconstruction_loss.cpu().detach().numpy()
)
running_kl_divergence += kl_divergence.cpu().detach().numpy()
if i % log_frequency == (log_frequency - 1):
normalized_loss = running_loss / log_frequency
normalized_reconstruction_loss = (
running_reconstruction_loss / log_frequency
)
normalized_kl_divergence = running_kl_divergence / log_frequency
print(f"Running Total Loss: {normalized_loss:0.3e}")
print(
f"Running Reconstruction Loss: {normalized_reconstruction_loss:0.3e}"
f" KL Divergence: {normalized_kl_divergence:0.3e}"
)
self.kl_losses.append(normalized_kl_divergence)
self.reconstruction_losses.append(normalized_reconstruction_loss)
running_loss = 0.0
running_reconstruction_loss = 0.0
running_kl_divergence = 0.0
if stop_early:
return None
if track_losses:
train_loss, valid_loss = self.log_train_val_loss()
self.copy_models_validation(valid_loss)
# compute test loss per sample
self.final_test_loss = self.compute_total_loss(
self.data.test_loader_batch_1
)
print(f"Test Loss: {self.final_test_loss:0.3e}")
def set_log_frequency(self):
frequency = len(self.data.train_loader) // 10
return frequency
def copy_models_validation(self, valid_loss):
"""Copies models with best validation"""
if valid_loss < np.min(self.valid_losses):
self.model_vest_valid = copy.deepcopy(self.model)
def log_train_val_loss(self, show_print=True):
train_loss = self.compute_total_loss(self.data.train_loader)
valid_loss = self.compute_total_loss(self.data.valid_loader)
self.train_losses.append(train_loss)
self.valid_losses.append(valid_loss)
if show_print:
print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}")
return train_loss, valid_loss
def compute_total_loss(self, loader):
"""Computes total average loss on given loader"""
self.model.eval()
losses = []
with torch.no_grad():
for x1, x2, params in loader:
x1 = x1.to(device=self.device)
reconstruction_loss, kl_divergence = self.compute_loss(x1)
loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs()
losses.append(loss.item())
mean_loss = np.mean(losses)
self.model.train()
return mean_loss
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.model.eval().cpu()
with torch.no_grad():
y, _, _ = self.model(x1)
return y
def reconstruct_mean(self, x1):
self.model.eval().cpu()
with torch.no_grad():
_, mu, _ = self.model(x1)
out = self.model.decoder(mu)
return out
def save_best_validation(self, path, indices=None):
"""Saves results best for model with best validation loss"""
self.model = self.model_best_valid
self.save(path, indices=indices)
def save(self, path, indices=None):
os.makedirs(path, exist_ok=True)
self.save_model_configs(path)
self.save_model(path)
self.save_losses(path)
self.save_plots(path)
def save_model_configs(self, path):
model_configs_str = self.__repr__()
model_configs = json.loads(model_configs_str)
file_path = os.path.join(path, "model_configs.json")
with open(file_path, "w") as outfile:
json.dump(model_configs, outfile)
def load_model(self, path):
device = torch.device("cpu")
model = self.model_cls(self.data.n_pixels, self.data.n_channels, self.z_dim)
model.load_state_dict(torch.load(path, map_location=device))
self.model = model
self.model.to(device=device)
def save_model(self, path):
full_path = os.path.join(path, "model.pt")
torch.save(self.model.state_dict(), full_path)
def save_losses(self, path):
file_path = os.path.join(path, "kl_divergence.npy")
np.save(file_path, self.kl_losses)
file_path = os.path.join(path, "reconstruction_losses.npy")
np.save(file_path, self.reconstruction_losses)
file_path = os.path.join(path, "train_losses.npy")
np.save(file_path, self.train_losses)
file_path = os.path.join(path, "valid_losses.npy")
np.save(file_path, self.valid_losses)
file_path = os.path.join(path, "test_loss.npy")
np.save(file_path, self.final_test_loss)
def save_plots(self, path):
matplotlib.use("Agg")
for train_set in [True, False]:
set_name = "train" if train_set else "test"
x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions")
self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set)
latent_traversal_path = os.path.join(path, f"x_{set_name}_latent_traversal")
self.plot_latent_traversal(
save_name=latent_traversal_path, train_set=train_set
)
def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x1_reconstructions(
pairs, self.reconstruct_mean, indices, train_set, save_name
)
def plot_latent_traversal(
self,
indices=None,
num_std=6.0,
train_set=True,
save_name=None,
fixed_range=True,
):
"""Traverses latent space from [mu - 3 * std, mu + 3 * std] for given indices.
If fixed_range is True, then [-num_std, num_std] is the interval.
"""
self.model.eval().cpu()
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=3)
for index in indices:
sample_save_name = save_name
if save_name is not None:
sample_save_name = save_name + "_sample_" + str(index)
self._plot_latent_traversal_helper(
pairs, index, num_std, train_set, sample_save_name, fixed_range
)
def plot_single_latent_traversal(
self, index=3, train_set=True, latent_dim=0, save_name=None, num_std=6.0,
):
self.model.eval().cpu()
pairs = self.data.X_train if train_set else self.data.X_test
sample_save_name = save_name
if save_name is not None:
sample_save_name = save_name + "_sample_" + str(index)
x1, x2, p = pairs[index]
title = "Training" if train_set else "Test"
traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std)
num_subplots = len(traversal_path) + 1
fig, axs = plt.subplots(1, num_subplots, figsize=(12, 16))
axs[0].imshow(x1.squeeze())
axs[0].set_title(f"{title}: x1, latent {latent_dim}")
axs[0].set_xticks([])
axs[0].set_yticks([])
with torch.no_grad():
_, mu, log_var = self.model(x1.unsqueeze(0))
z = mu
for i, step in enumerate(traversal_path):
z_shifted = z.clone().cpu().detach()
z_shifted[0][latent_dim] = step
with torch.no_grad():
reconstruction = self.model.decoder(z_shifted)
axs[i + 1].imshow(reconstruction.squeeze().detach().numpy())
axs[i + 1].set_xticks([])
axs[i + 1].set_yticks([])
fig.tight_layout()
if save_name:
# close figure to speed up saving
plt.savefig(sample_save_name, bbox_inches="tight", dpi=100)
plt.close(fig)
@staticmethod
def get_std_path(num_std):
"""Returns list of std steps.
[-3, -2, -1, 0, 1, 2, 3]
"""
step_size = num_std / 3.0
positive_steps = [i * step_size for i in range(1, 4)]
negative_steps = sorted(list(-1 * np.array(positive_steps)))
path = negative_steps + [0] + positive_steps
return path
def _plot_latent_traversal_helper(
self, X, index, num_std, train_set, save_name, fixed_range
):
title = "Training" if train_set else "Test"
traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std)
num_subplots = len(traversal_path) + 1
x1, x2, p = X[index]
fig, axs = plt.subplots(self.z_dim, num_subplots, figsize=(20, 60))
for dim in range(self.z_dim):
axs[dim, 0].imshow(x1.squeeze())
axs[dim, 0].set_title(f"{title}: x1, latent {dim}")
axs[dim, 0].set_xticks([])
axs[dim, 0].set_yticks([])
with torch.no_grad():
_, mu, log_var = self.model(x1.unsqueeze(0))
z = mu
for i, step in enumerate(traversal_path):
if not fixed_range:
z_shifted = CCIVariationalAutoEncoder.shift_latent(
z, dim, step, log_var
)
else:
z_shifted = z.clone().cpu().detach()
z_shifted[0][dim] = step
with torch.no_grad():
reconstruction = self.model.decoder(z_shifted)
axs[dim, i + 1].imshow(reconstruction.squeeze().detach().numpy())
if not fixed_range:
axs[dim, i + 1].set_title(f"std {step:.1f}")
else:
axs[dim, i + 1].set_title(f"{step:.1f}")
axs[dim, i + 1].set_xticks([])
axs[dim, i + 1].set_yticks([])
fig.tight_layout()
if save_name:
# close figure to speed up saving
plt.savefig(save_name, bbox_inches="tight", dpi=100)
plt.close(fig)
@staticmethod
def shift_latent(z, dim, num_std, log_var):
"""Shifts latent by num_std along index of latent dimension"""
std = torch.exp(log_var / 2.0)
z_shifted = z.clone().cpu().detach()
z_shifted[0][dim] += num_std * std[0][dim]
return z_shifted
def get_latents(self, train_set=False, num_batches=1000):
"""Returns latent representation for random indices"""
self.model.eval().cpu()
loader = self.data.train_loader if train_set else self.data.test_loader
Z = []
for i, (x1, x2, p) in enumerate(loader):
z = self.get_latent(x1)
Z.append(z)
if i == num_batches:
break
Z = torch.cat(Z)
return Z
def get_latent(self, x):
with torch.no_grad():
_, mu, var = self.model(x)
z = self.model.reparameterize(mu, var)
return z
def compute_latent_variances(self, n_samples=None):
"""Computes variance of latents across transformations of a sample"""
if n_samples is None:
n_samples = len(self.data.X_orig_test)
variances = []
for i in range(n_samples):
x1 = self.data.X_orig_test[i]
self.model.eval().cpu()
with torch.no_grad():
sample_latents = []
for param in self.data.transform_params:
x_transformed = transformations.transform(x1, param)
_, mu, log_var = self.model(x_transformed.unsqueeze(0))
# use mean of latent
z = mu
sample_latents.append(z)
sample_latents = torch.cat(sample_latents)
sample_var = sample_latents.var(dim=0)
variances.append(sample_var)
variances = torch.stack(variances).numpy()
return variances
def compute_latents_per_shape(self, n_samples=None):
"""Computes variance of latents across transformations of a sample"""
if n_samples is None:
n_samples = len(self.data.X_orig_test)
latents = []
for i in range(n_samples):
x1 = self.data.X_orig_test[i]
self.model.eval().cpu()
with torch.no_grad():
sample_latents = []
for param in self.data.transform_params:
x_transformed = transformations.transform(x1, param)
_, mu, log_var = self.model(x_transformed.unsqueeze(0))
# use mean of latent
z = mu
sample_latents.append(z)
sample_latents = torch.cat(sample_latents)
latents.append(sample_latents)
latents = torch.stack(latents).numpy()
return latents
def pca_ranked_eigenvalues(self, n_samples=None):
"""Returns average of ranked normalized eigenvalues for latents"""
latents = self.compute_latents_per_shape(n_samples=n_samples)
n_components = self.data.n_rotations + 1
aggregate_ranked_normalized_eigenvalues = []
for latent in latents:
pca = PCA(n_components=n_components)
pca.fit(latents[1])
ranked_normalized_eigenvalues = np.sort(pca.explained_variance_ratio_)[::-1]
aggregate_ranked_normalized_eigenvalues.append(
ranked_normalized_eigenvalues
)
aggregate_ranked_normalized_eigenvalues = np.stack(
aggregate_ranked_normalized_eigenvalues
)
average_var_explained = np.mean(aggregate_ranked_normalized_eigenvalues, axis=0)
return average_var_explained
def compute_mutual_info(variances):
"""Variances is a numpy array with shape (n_samples, z_dim)"""
n = variances.shape[0]
m_info = np.log(2 * np.pi * variances).sum(0) / (2.0 * n)
return m_info
def load_data(configs, path):
data_configs = json.loads(configs["data"])
if "shapes" and "2k-classes" in path:
data = datasets.SimpleShapes(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
n_classes=2000,
seed=0,
)
elif "mnist" in path:
data = datasets.ProjectiveSingleDigitMNIST(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
train_set_proportion=0.1,
valid_set_proportion=0.1,
test_set_proportion=1.0,
seed=0,
)
else:
raise ValueError("data not found")
return data
def load(path):
with open(os.path.join(path, "model_configs.json")) as f:
configs = json.load(f)
data = load_data(configs, path)
model = CCIVariationalAutoEncoder(
data,
z_dim=configs["z_dim"],
beta=configs["beta"],
c_max=configs["c_max"],
distribution=configs["distribution"],
learning_rate=configs["learning_rate"],
n_epochs=configs["n_epochs"],
)
model.load_model(os.path.join(path, "model.pt"))
return model
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
n_epochs = 2
batch_size = 16
simple_shapes = datasets.SimpleShapes(batch_size)
vae = CCIVariationalAutoEncoder(
simple_shapes, beta=0.0, c_max=0.0, device=device, n_epochs=n_epochs
)
vae.train()
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import models
import latent_operators
from datasets import datasets
from datasets.data_utils import x_to_image
import plot
import pdb
import os
import shutil
import numpy as np
eps = 1e-20
class ComplexAutoEncoder:
"""Trains a shift operator.
Args:
data (AbstractDataset): contains train and test loaders with angles
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
z_dim=405,
seed=0,
encoder_type="ComplexLinear",
decoder_type="ComplexLinear",
transformation_types=None,
indexes=None,
device="cpu",
output_directory="output",
save_name="",
n_rotations = 0,
n_x_translations = 0,
n_y_translations = 0,
scaling_factors = (1, )
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.transformation_types = transformation_types
self.W_r = torch.nn.ModuleList()
self.W_i = torch.nn.ModuleList()
for i in range(len(self.transformation_types)-1):
self.W_r.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device))
self.W_i.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device))
cardinals = [
n_rotations + 1,
n_x_translations + 1,
n_y_translations + 1,
len(scaling_factors),
]
self.cardinals = cardinals
# function used for transformation
# indexes 0, 1, 2
self.transforms = []
for i in range(len(transformation_types)):
self.transforms.append(self.get_transformation(transformation_types[i], indexes[i]))
self.output_dir = output_directory
self.save_name = save_name
self.best_epoch = 0
self.best_mse = 0
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation(self, name, index):
"""Returns function to performance transformation based name"""
if name is None:
return None
transformation = getattr(latent_operators, name)
return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True, index=index)
def return_shifts(self, params):
smallest_angle = 360 / (self.data.n_rotations + 1)
int_x = round(self.data.n_pixels / (self.data.n_x_translations + 1))
int_y = round(self.data.n_pixels / (self.data.n_y_translations + 1))
shifts_x = torch.LongTensor([[param.shift_x/int_x for param in params]]).t()
shifts_y = torch.LongTensor([[param.shift_y/int_y for param in params]]).t()
shifts_r = torch.LongTensor([[int(param.angle/smallest_angle) for param in params]]).t()
shifts = []
if self.data.n_rotations > 0:
shifts.append(shifts_r)
if self.data.n_x_translations > 0:
shifts.append(shifts_x)
if self.data.n_y_translations > 0:
shifts.append(shifts_y)
return shifts
def transform(self, z1, shifts):
N_transfo = len(self.transforms)
# shifts is now a tuple
z_r = z1[0]
z_i = z1[1]
for i in range(0,N_transfo-1,1):
z_transformed = self.transforms[i]((z_r,z_i), shifts[i])
z_r = z_transformed[0]
z_i = z_transformed[1]
z_r = self.W_r[i](z_r) - self.W_i[i](z_i)
z_i= self.W_r[i](z_i) + self.W_i[i](z_r)
z_transformed = self.transforms[N_transfo-1]((z_r,z_i), shifts[N_transfo-1])
return z_transformed
def train(self, loss_func, learning_rate, n_epochs, log_frequency):
self.encoder.train()
self.decoder.train()
params = list(self.encoder.parameters()) + list(self.decoder.parameters()) + \
list(self.W_r.parameters()) + list(self.W_i.parameters())
optimizer = torch.optim.Adam(params, lr=learning_rate)
train_losses = torch.FloatTensor(n_epochs)
valid_losses = torch.FloatTensor(n_epochs)
best_mse = np.inf
N_pairs = len(self.data.train_loader.dataset)
for epoch in range(n_epochs):
epoch_loss = 0
for i, (x1, x2, angles) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * x1.size(0)
epoch_loss = epoch_loss / N_pairs
print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}")
valid_mse = (
self.compute_mean_loss(loss_func, self.data.valid_loader)
.detach()
.item()
)
train_losses[epoch] = epoch_loss
if valid_mse < best_mse:
self.update_state(mse=valid_mse, epoch=epoch)
best_mse = valid_mse
file_name = "checkpoint_{}.pth.tar".format(self.save_name)
self.save_best_checkpoint(
out_dir=self.output_dir,
file_name=file_name,
optimizer_state_dict=optimizer.state_dict(),
)
print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}")
valid_losses[epoch] = valid_mse
return train_losses.detach().numpy(), valid_losses.detach().numpy()
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
with torch.no_grad():
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
return x1_reconstruction_r
def reconstruct_x2(self, x1, param):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
batch_size = x1.size(0)
with torch.no_grad():
z1 = self.encoder(x1)
shifts = self.return_shifts([param])
z_transformed = self.transform(z1, shifts)
x2_reconstruction_r = self.decoder(z_transformed)
return x2_reconstruction_r
def plot_x1_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def reconstruction_mse_transformed_z1(self, x1, x2, params):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles"""
criterion = torch.nn.MSELoss(reduction="none")
batch_size = x1.size(0)
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
x1_reconstruction_loss = criterion(x1_reconstruction_r, x1)
x1_reconstruction_loss = x1_reconstruction_loss.mean()
shifts = self.return_shifts(params)
z_transformed = self.transform(z1, shifts)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_loss = criterion(x2_reconstruction_r, x2)
x2_reconstruction_loss = x2_reconstruction_loss.mean()
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def compute_test_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
N = 0
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
bs = x1.size(0)
loss_batch = loss_func(x1, x2, angles)*bs
N += bs
losses.append(loss_batch)
test_loss = torch.stack(losses).sum() / float(N)
self.encoder.train()
self.decoder.train()
return test_loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
loss_batch = loss_func(x1, x2, angles)
losses.append(loss_batch)
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def run(
self, learning_rate=0.0005, n_epochs=10, log_frequency=50
):
"""Runs experiment for autoencoder reconstruction."""
loss_func = self.reconstruction_mse_transformed_z1
train_loss, valid_loss = self.train(
loss_func, learning_rate, n_epochs, log_frequency
)
train_mse = self.compute_mean_loss(loss_func, self.data.train_loader)
print(f"Train MSE: {train_mse}")
valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader)
print(f"Valid MSE: {valid_mse}")
test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100)
print(f"Test MSE: {test_mse}")
return train_loss, valid_loss, train_mse, valid_mse, test_mse
def update_state(self, mse, epoch):
self.best_mse = mse
self.best_epoch = epoch
def load_model(self, path_to_checkpoint):
checkpoint = torch.load(path_to_checkpoint)
self.best_epoch = checkpoint["best_epoch"]
self.encoder.load_state_dict(checkpoint["encoder_state_dict"])
self.decoder.load_state_dict(checkpoint["decoder_state_dict"])
for t in range(len(self.transformation_types) - 1):
self.W_r[t].load_state_dict(checkpoint["W_r"][t])
self.W_i[t].load_state_dict(checkpoint["W_i"][t])
self.best_mse = checkpoint["best_mse"]
return checkpoint["best_mse"], checkpoint["best_epoch"]
def get_current_state(self):
W_r = {}
W_i = {}
for t in range(len(self.transformation_types)-1):
W_r[t] = self.W_r[t].state_dict()
W_i[t] = self.W_i[t].state_dict()
return {
"encoder_state_dict": self.encoder.state_dict(),
"decoder_state_dict": self.decoder.state_dict(),
"W_r": W_r,
"W_i": W_i,
"best_epoch": self.best_epoch,
"best_mse": self.best_mse,
}
def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict):
"""
:param file_name: filename to save checkpoint in.
:param optimizer_state_dict: state of the optimizer.
:return: str to path where the model is saved.
"""
state = self.get_current_state()
state["optimizer_state_dict"] = optimizer_state_dict
best_path = os.path.join(out_dir, "best_" + file_name)
torch.save(state, best_path)
def plot_multiple_transformations_stacked(self, indices, n_plots, train_set=False, save_name=None):
degree_sign = "\N{DEGREE SIGN}"
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
plot.plot_rotations_translations(
X,
self,
n_plots,
self.data.n_rotations,
self.data.n_x_translations,
self.data.n_y_translations,
save_name=save_name
)
def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if param_name=='angle' != "angle" else "Rotations"
)
plot.plot_transformations_complex(
X,
self,
title,
save_name=save_name,
param_name=param_name,
supervised=True,
) |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
---
Saves model/plots for best validation MSE
"""
import math
import numpy as np
import os
from distutils.dir_util import copy_tree
def save_best_validation_helper(folder, operator):
min_valid_loss = math.inf
for sweep in os.listdir(folder):
if sweep.startswith("best") or sweep.startswith(".DS_Store"):
continue
path = os.path.join(folder, sweep, operator)
try:
valid_loss = np.min(np.load(os.path.join(path, "valid_losses.npy")))
except FileNotFoundError:
print(f"run {sweep} missing for {operator}")
continue
if min_valid_loss >= valid_loss:
min_valid_loss = valid_loss
destination = os.path.join(folder, "best-validation", operator)
copy_tree(path, destination)
def save_all_best_validation(parent_folder):
for experiment in os.listdir(parent_folder):
experiment_path = os.path.join(parent_folder, experiment)
if experiment.endswith("-sweep") and "autoencoder" in experiment and "standard" not in experiment:
save_best_validation_helper(experiment_path, "disentangled-operator")
save_best_validation_helper(experiment_path, "shift-operator")
elif experiment.endswith("-sweep") and "standard-autoencoder" in experiment:
save_best_validation_helper(experiment_path, "standard-autoencoder")
elif experiment.endswith("-sweep") and "cci-vae" in experiment:
save_best_validation_helper(experiment_path, "cci_vae")
save_best_validation_helper(experiment_path, "beta_vae")
save_best_validation_helper(experiment_path, "vae")
if __name__ == "__main__":
user = os.environ["USER"]
parent_folder = f"/checkpoint/{user}/Equivariance/"
save_all_best_validation(parent_folder)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""
Transformations applied to the input images
"""
import torch
import itertools
import numpy as np
import skimage.transform
from dataclasses import dataclass
# TODO: set automaticlaly based on n_pixels
TRANSLATION_INTERVAL = [0, 28]
@dataclass
class Params:
"""
angle (float): counter-clockwise rotation angle in degrees
shift_x (float): shift value to the right
shift_y (float): shift value to upwards
scale (float): scaling factor
"""
angle: float = 0.0
shift_x: float = 0.0
shift_y: float = 0.0
scale: float = 1.0
def transform(image, params):
"""
Applies transformations on a single image based on params.
Order of transformation is: rotate, translate, scale
Args:
image (np.array or torch.tensor): of shape [n_pixels, n_pixels]
params (Params): contains parameters for rotations, scaling etc.
Returns: image with transformations applied
"""
assert (
image.ndim == 3
), f"image must be of shape [n_channels, n_pixels, n_pixels] not {image.shape}"
image_transformed = image.squeeze()
# Rotate
if params.angle not in (0.0, 360.0):
# cval is the fill value.
image_transformed = skimage.transform.rotate(
image_transformed, params.angle, cval=image_transformed.min()
)
# Translate
# if edge is reached cut-off portion appears on other side
if params.shift_x != 0.0:
image_transformed = np.roll(image_transformed, int(params.shift_x), axis=1)
if params.shift_y != 0.0:
image_transformed = np.roll(image_transformed, -int(params.shift_y), axis=0)
# Scale
if params.scale != 1.0:
image_transformed = rescale(image_transformed, params.scale)
image_transformed = to_torch(image, image_transformed)
return image_transformed
def rescale(image, scale):
"""Rescales images based on given scale factor"""
scale_transform = skimage.transform.SimilarityTransform(scale=scale)
image = skimage.transform.warp(
image, scale_transform.inverse, mode="constant", cval=image.min(),
)
return image
def to_torch(image, image_transformed):
"""Converts numpy matrix to torch tensor with correct shape"""
image_transformed = image_transformed.reshape(image.shape)
if torch.is_tensor(image_transformed):
return image_transformed.float()
if torch.is_tensor(image):
image_transformed = torch.from_numpy(image_transformed).float()
return image_transformed
def get_transform_params(
n_rotations, n_x_translations, n_y_translations, scaling_factors,
):
"""Returns transform params corresponding given values.
Translations subdivide translation interval.
Args:
n_rotations (int): number of subdivisions of 360 to apply.
n_x_translations (int): number of shifts along x-axis
n_y_translations (int): number of shifts along y-axis
scaling_factors (list or tuple floats): representing the scaling factors to use
Returns: Params object
"""
shifts_x = get_shifts(n_x_translations, TRANSLATION_INTERVAL)
shifts_y = get_shifts(n_y_translations, TRANSLATION_INTERVAL)
for angle in get_rotation_angles(n_rotations):
for shift_x, shift_y in itertools.product(shifts_x, shifts_y):
for scale in scaling_factors:
params = Params(
angle=angle, shift_x=shift_x, shift_y=shift_y, scale=scale
)
yield params
def get_shifts(n_translations, interval):
"""Returns shifts along given axis by dividing interval.
Args:
interval (list of ints): [0, n_pixels]
n_translations (int): should be divisible by n_pixels
"""
if n_translations == 0:
return [0]
elif n_translations == 1:
return [0, interval[1] // 2]
min_shift = round(interval[1] / (n_translations + 1))
steps = [n * min_shift for n in range(n_translations + 1)]
return steps
def get_rotation_angles(n_rotations):
"""Yields rotation angles based on subdivisions given.
Example:
>>> get_rotation_angles(2) => [0.0, 120.0, 240.0]
"""
min_angle = 360.0 / (n_rotations + 1)
for n in range(n_rotations + 1):
yield min_angle * n
def shift_to_angle(shift_val, n_transformations):
"""Returns the angle corresponding to the shift_val.
Example: [0, 32], shift_val = 4, we should get 4 / 32 * 360
"""
if shift_val == TRANSLATION_INTERVAL[1]:
return 0.0
shift_ratio = float(shift_val) / TRANSLATION_INTERVAL[1]
angle = 360.0 * shift_ratio
return angle
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from sklearn.model_selection import StratifiedShuffleSplit
import torchvision
from . import data_utils
from abc import ABC, abstractmethod
from datasets import transformations
import numpy as np
import random
import json
class AbstractDataset(ABC):
"""
Defines common fields needed for datasets
Attributes:
batch_size (int): batch size used for dataloaders
train_load (torch.utils.data.Dataset): X1, X2, Angle(s)
test_load (torch.utils.data.Dataset): X1, X2, Angle(s)
pairs (bool): indicates whether to use Pairs dataset where both x1 and x2 are transformed.
Otherwise, Single dataset is used where only x1 is transformed.
"""
def __init__(
self,
batch_size,
n_rotations=0,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
seed=0,
pairs=True,
):
AbstractDataset.set_seed(seed)
self.batch_size = batch_size
self.n_x_translations, self.n_y_translations = (
n_x_translations,
n_y_translations,
)
self.n_rotations, self.scaling_factors = n_rotations, scaling_factors
self.X_orig_train, self.X_orig_valid, self.X_orig_test = self.get_original()
self.transform_params = list(
transformations.get_transform_params(
n_rotations=self.n_rotations,
n_x_translations=self.n_x_translations,
n_y_translations=self.n_y_translations,
scaling_factors=self.scaling_factors,
)
)
data_cls = Pairs if pairs else Single
self.X_train = data_cls(self.X_orig_train, self.transform_params)
self.train_loader = torch.utils.data.DataLoader(
self.X_train,
batch_size=self.batch_size,
shuffle=True,
collate_fn=Pairs.collate,
)
# For validation and test, use shuffle = False to have SequentialSampler(dataset) by default
# (see https://github.com/pytorch/pytorch/blob/bfa94487b968ccb570ef8cd9547029b967e76ed0/torch/utils/data/dataloader.py#L257)
self.X_valid = data_cls(self.X_orig_valid, self.transform_params)
self.valid_loader = torch.utils.data.DataLoader(
self.X_valid,
batch_size=self.batch_size,
shuffle=False,
collate_fn=Pairs.collate,
)
self.X_test = data_cls(self.X_orig_test, self.transform_params)
self.test_loader = torch.utils.data.DataLoader(
self.X_test,
batch_size=self.batch_size,
shuffle=False,
collate_fn=Pairs.collate,
)
self.test_loader_batch_1 = torch.utils.data.DataLoader(
self.X_test, batch_size=1, shuffle=False, collate_fn=Pairs.collate,
)
self.test_loader_batch_100 = torch.utils.data.DataLoader(
self.X_test, batch_size=100, shuffle=False, collate_fn=Pairs.collate,
)
def __repr__(self):
attributes = {
"n_rotations": self.n_rotations,
"n_x_translations": self.n_x_translations,
"n_y_translations": self.n_y_translations,
"scaling_factors": self.scaling_factors,
}
return json.dumps(attributes)
@abstractmethod
def get_original(self):
"""Sets X_train and X_test to images in original dataset"""
pass
@property
def total_n_transformations(self):
"""Computes the total number of transformations"""
n_translations = (1 + self.n_x_translations) * (1 + self.n_y_translations)
n = n_translations * (1 + self.n_rotations) * len(self.scaling_factors)
return n
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
@classmethod
def __subclasshook__(cls, C):
"""Verifies dataset has loader of correct type"""
for loader in ["train_loader", "test_loader"]:
is_valid = hasattr(cls, loader) and isinstance(
(getattr(cls, loader)), Dataset
)
if not is_valid:
return False
return True
class ProjectiveMNIST(AbstractDataset):
"""Builds MNIST dataset with transformations applied lazly.
Loader contains: (digit, rotated_digit, angle)
Shape of Data: (batch_size, 1, 28, 28)
Args:
batch_size (int): batch size to user for dataloaders
n_rotations (int): number discrete rotations per image
train_set_proportion (float): proportion of training set to keep
valid_set_proportion (float): proportion of training set to keep
test_set_proportion (float): proportion of training set to keep
"""
def __init__(
self,
batch_size,
n_rotations=4,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
train_set_proportion=0.1,
valid_set_proportion=1.0,
test_set_proportion=1.0,
seed=0,
pairs=True,
):
self.train_set_proportion = train_set_proportion
self.valid_set_proportion = valid_set_proportion
self.test_set_proportion = test_set_proportion
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
self.n_pixels = self.X_orig_train[0].shape[1]
self.n_channels = 1
def get_original(self):
"""Returns original training and test images"""
mnist_train, mnist_val, mnist_test = self.download_mnist()
# normalize MNIST so values are between [0, 1]
x_train = mnist_train.data.unsqueeze(1) / 255.0
x_val = mnist_val.data.unsqueeze(1) / 255.0
x_test = mnist_test.data.unsqueeze(1) / 255.0
return x_train, x_val, x_test
@staticmethod
def stratified_sample(X, y, size):
"""Returns a stratified sample"""
if size == 1.0:
return X
test_size = 1 - size
sampler = StratifiedShuffleSplit(
n_splits=1, test_size=test_size, random_state=0
)
indices, _ = next(sampler.split(X, y))
X_sample = X[indices]
return X_sample
@staticmethod
def split_train_valid(train_set, split=10000):
num_train = len(train_set)
indices = list(range(num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_data = train_set.data[train_idx]
valid_data = train_set.data[valid_idx]
train_targets = train_set.targets[train_idx]
valid_targets = train_set.targets[valid_idx]
return train_data, train_targets, valid_data, valid_targets
def download_mnist(self):
"""Skips download if cache is available"""
train_set = torchvision.datasets.MNIST(
"/tmp/",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
test_set = torchvision.datasets.MNIST(
"/tmp/",
train=False,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
(
train_data,
train_targets,
valid_data,
valid_targets,
) = ProjectiveMNIST.split_train_valid(train_set)
# stratified samples
train_data = ProjectiveMNIST.stratified_sample(
train_data, train_targets, self.train_set_proportion
)
valid_data = ProjectiveMNIST.stratified_sample(
valid_data, valid_targets, self.valid_set_proportion
)
test_data = ProjectiveMNIST.stratified_sample(
test_set.data, test_set.targets, self.test_set_proportion
)
return train_data, valid_data, test_data
class ProjectiveSingleDigitMNIST(AbstractDataset):
"""Builds MNIST dataset with transformations applied lazly.
Loader contains: (digit, rotated_digit, angle)
Shape of Data: (batch_size, 1, 28, 28)
Args:
batch_size (int): batch size to user for dataloaders
n_rotations (int): number discrete rotations per image
train_set_proportion (float): proportion of training set to keep
valid_set_proportion (float): proportion of training set to keep
test_set_proportion (float): proportion of training set to keep
"""
def __init__(
self,
batch_size,
n_rotations=4,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
train_set_proportion=0.1,
valid_set_proportion=1.0,
test_set_proportion=1.0,
seed=0,
pairs=True,
digit=4,
):
self.train_set_proportion = train_set_proportion
self.valid_set_proportion = valid_set_proportion
self.test_set_proportion = test_set_proportion
self.digit = digit
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
self.n_pixels = self.X_orig_train[0].shape[1]
self.n_channels = 1
def get_original(self):
"""Returns original training and test images"""
mnist_train, mnist_val, mnist_test = self.download_mnist()
# normalize MNIST so values are between [0, 1]
x_train = mnist_train.data.unsqueeze(1) / 255.0
x_val = mnist_val.data.unsqueeze(1) / 255.0
x_test = mnist_test.data.unsqueeze(1) / 255.0
return x_train, x_val, x_test
@staticmethod
def split_train_valid(train_set, split=10000):
num_train = len(train_set)
indices = list(range(num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_data = train_set.data[train_idx]
valid_data = train_set.data[valid_idx]
train_targets = train_set.targets[train_idx]
valid_targets = train_set.targets[valid_idx]
return train_data, train_targets, valid_data, valid_targets
def sample_single_digit(self, x, targets, proportion):
idx = targets == self.digit
x_digit = x[idx]
sample_size = int(len(idx) * proportion)
return x_digit[:sample_size]
def download_mnist(self):
"""Skips download if cache is available"""
train_set = torchvision.datasets.MNIST(
"/tmp/",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
test_set = torchvision.datasets.MNIST(
"/tmp/",
train=False,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
(
train_data,
train_targets,
valid_data,
valid_targets,
) = ProjectiveMNIST.split_train_valid(train_set)
# stratified samples
train_data = self.sample_single_digit(
train_data, train_targets, self.train_set_proportion
)
valid_data = self.sample_single_digit(
valid_data, valid_targets, self.valid_set_proportion
)
test_data = self.sample_single_digit(
test_set.data, test_set.targets, self.test_set_proportion
)
return train_data, valid_data, test_data
class SimpleShapes(AbstractDataset):
def __init__(
self,
batch_size,
n_pixels=28,
n_classes=300,
n_points=5,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
n_channels=1,
seed=0,
pairs=True,
):
self.n_pixels, self.n_classes = n_pixels, n_classes
self.n_points, self.n_channels = n_points, n_channels
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
@staticmethod
def normalize(X):
return torch.clamp(X + 1, 0.0, 1.0)
def get_original(self):
np.random.seed(1) # Sets seed
data = data_utils.generate_dataset(self.n_pixels, self.n_classes, self.n_points)
(X_train, _), (X_test, _) = data
X_trainvalid = torch.from_numpy(X_train).unsqueeze(1).float()
N = X_trainvalid.size(0)
Nvalid = int(N * 0.2) # Keeps 20% for validation
X_valid = SimpleShapes.normalize(X_trainvalid[:Nvalid, ...])
X_train = SimpleShapes.normalize(X_trainvalid[Nvalid:, ...])
X_test = SimpleShapes.normalize(torch.from_numpy(X_test).unsqueeze(1).float())
return X_train, X_valid, X_test
class Single(Dataset):
"""Contains x1 transformed with parameters.
Total number of samples == x1 transformed
"""
def __init__(self, X, params):
self.X = X
self.params = params
def __len__(self):
return self.X.shape[0] * len(self.params)
@staticmethod
def collate(batch):
"""Used for dataloader"""
X1 = torch.stack([item[0] for item in batch])
X2 = torch.stack([item[1] for item in batch])
params = [item[2] for item in batch]
return X1, X2, params
def get_x_idx(self, idx):
"""Returns the idx of the original image x."""
return idx // len(self.params)
def get_x1(self, idx, x_idx):
x = self.X[x_idx]
p = len(self.params)
x1_params_idx = idx % p
x1_params = self.params[x1_params_idx]
x1 = transformations.transform(x, x1_params)
return x1, x1_params
def __getitem__(self, idx):
x_idx = self.get_x_idx(idx)
x1, x1_params = self.get_x1(idx, x_idx)
x2 = self.X[x_idx]
return x1, x2, x1_params
class Pairs(Dataset):
"""Contains x1, x2, and transformation params.
Total of n_samples * num_params^2 pairs:
(x0, t0) => x1
(x1, t0) => x2
(x0, t0) => x1
(x1, t1) => x2
Args:
X (original images): [n_samples, n_pixels, n_pixels]
params (list of transformations.Params): parameters for transformations
"""
def __init__(self, X, params):
self.X = X
self.params = params
def __len__(self):
return self.X.shape[0] * (len(self.params) ** 2)
@staticmethod
def collate(batch):
"""Used for dataloader"""
X1 = torch.stack([item[0] for item in batch])
X2 = torch.stack([item[1] for item in batch])
params = [item[2] for item in batch]
return X1, X2, params
def get_x_idx(self, idx):
"""Returns the idx of the original image x."""
return idx // (len(self.params) ** 2)
def get_x1(self, idx, x_idx):
x = self.X[x_idx]
p = len(self.params)
x1_params_idx = (idx - (x_idx) * p * p) // p
x1_params = self.params[x1_params_idx]
x1 = transformations.transform(x, x1_params)
return x1
def get_x2_params(self, idx, x_idx):
p = len(self.params)
x1_params_idx = (idx - (x_idx) * p * p) // p
x2_params_idx = idx - ((x_idx * p * p) + (x1_params_idx * p))
return self.params[x2_params_idx]
def __getitem__(self, idx):
x_idx = self.get_x_idx(idx)
x1 = self.get_x1(idx, x_idx)
x2_params = self.get_x2_params(idx, x_idx)
x2 = transformations.transform(x1, x2_params)
x1, x2 = x1, x2
return x1, x2, x2_params
class ShapeNet(AbstractDataset):
pass
class ShapeNetIterator(Dataset):
"""ShapeNet Iterator"""
def __init__(self, V, transform=None):
self.V = V
self.preprocess = transforms.Compose(
[
# transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
def __len__(self):
return len(self.V[0])
def __getitem__(self, idx):
return tuple([self.preprocess(self.V[v][idx]) for v in range(len(self.V))])
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""Script demonstrating drawing of anti-aliased lines using Xiaolin Wu's line
algorithm
usage: python xiaolinwu.py [output-file]
"""
from __future__ import division
import sys
from PIL import Image
def _fpart(x):
return x - int(x)
def _rfpart(x):
return 1 - _fpart(x)
def putpixel(img, xy, color, alpha=1):
"""Paints color over the background at the point xy in img.
Use alpha for blending. alpha=1 means a completely opaque foreground.
"""
c = tuple(map(lambda bg, fg: int(round(alpha * fg + (1-alpha) * bg)),
img.getpixel(xy), color))
img.putpixel(xy, c)
def draw_line(img, p1, p2, color):
"""Draws an anti-aliased line in img from p1 to p2 with the given color."""
x1, y1 = p1
x2, y2 = p2
dx, dy = x2-x1, y2-y1
steep = abs(dx) < abs(dy)
p = lambda px, py: ((px,py), (py,px))[steep]
if steep:
x1, y1, x2, y2, dx, dy = y1, x1, y2, x2, dy, dx
if x2 < x1:
x1, x2, y1, y2 = x2, x1, y2, y1
grad = dy/dx
intery = y1 + _rfpart(x1) * grad
def draw_endpoint(pt):
x, y = pt
xend = round(x)
yend = y + grad * (xend - x)
xgap = _rfpart(x + 0.5)
px, py = int(xend), int(yend)
putpixel(img, p(px, py), color, _rfpart(yend) * xgap)
putpixel(img, p(px, py+1), color, _fpart(yend) * xgap)
return px
xstart = draw_endpoint(p(*p1)) + 1
xend = draw_endpoint(p(*p2))
for x in range(xstart, xend):
y = int(intery)
putpixel(img, p(x, y), color, _rfpart(intery))
putpixel(img, p(x, y+1), color, _fpart(intery))
intery += grad |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
from .xiaolinwu import draw_line
blue = (0, 0, 255)
yellow = (255, 255, 0)
white = (255, 255, 255)
black = (0, 0, 0)
def generate_images_from_coords(NPX, NP, C, cols):
images = list()
for c in range(C.shape[2]):
img = Image.new("RGB", (NPX, NPX), white)
for p in range(NP - 1):
if (C[0, p + 1, c] != C[0, p, c]) or (C[1, p + 1, c] != C[1, p, c]):
draw_line(
img,
(C[0, p + 1, c], C[1, p + 1, c]),
(C[0, p, c], C[1, p, c]),
cols[c],
)
draw_line(
img,
(C[0, p, c], C[1, p, c]),
(C[0, p + 1, c], C[1, p + 1, c]),
cols[c],
)
if (C[0, p + 1, c] != C[0, 0, c]) or (C[1, p + 1, c] != C[1, 0, c]):
draw_line(
img, (C[0, p + 1, c], C[1, p + 1, c]), (C[0, 0, c], C[1, 0, c]), cols[c]
)
draw_line(
img, (C[0, 0, c], C[1, 0, c]), (C[0, p + 1, c], C[1, p + 1, c]), cols[c]
)
images.append(np.array(img))
return images
# Draw images correspoding to different classes
def plot_and_save_grid(NPX, images, margin=1, name="FIGS/junk.png"):
grid = np.zeros((NPX + 2 * margin, NPX * NC + margin * NC + margin, 3))
pointer = 0
for img in images:
grid[
margin : NPX + margin, 0 + pointer + margin : NPX + pointer + margin, :
] = img
pointer += NPX + margin
im = Image.fromarray(np.uint8((grid)))
im.save(name)
return im
class MyDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, V, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# self.root = ts.root
# self.transform = transforms.ToTensor()
self.V = V
def __len__(self):
return len(self.V[0])
def __getitem__(self, idx):
try:
return tuple([self.V[v][idx] for v in range(len(self.V))])
except:
pdb.set_trace()
# return (self.transform(self.train_data[idx,:,:,:]),self.train_labels[idx])
# return Dataset.__getitem__(self, idx)
# super()
def pytorch_dataset(V, batch_size):
# order = np.random.permutation(NS)
ts = MyDataset(V)
loader = torch.utils.data.DataLoader(ts, batch_size=batch_size, shuffle=True)
return loader
def generate_dataset(NPX, NC, NP):
NS = NC * 2 # number of samples
# coordinates of each classes of objects
C = np.random.randint(0 + NPX / 6, NPX - 1 - NPX / 6, (2, NP, NC))
cols = np.zeros((NS, 3))
# Generate images corresponding to different classes using Xiaolin Wu's line algorithm for anti-aliasing
cols = np.zeros((NS, 3))
X = np.array(
generate_images_from_coords(NPX, NP, C[:, :, :].reshape((2, NP, NC)), cols)
)
X = 1 - np.mean(X, axis=3)
# normalize (negative sign ensure background is min)
X = X / -X.mean()
y = np.arange(NC)
y = y.flatten()
Y = y.astype(int)
split = NS // 4
Xtrain = X[:split]
Ytrain = Y[:split]
Xtest = X[split:]
Ytest = Y[split:]
return ((Xtrain, Ytrain), (Xtest, Ytest))
def generate_angles(NT1, NT2, NC):
# create pairs of shape with all angles
NT = NT1 * NT2 ** 2
[ind1, ind2] = np.meshgrid(range(NT), range(NT))
s1 = ind1.flatten()
s2 = ind2.flatten()
alphas = (s1 - s2) % (NT1)
sangle1 = np.floor(s1 / NT2 ** 2)
sangle2 = np.floor(s2 / NT2 ** 2)
strans1 = s1 % NT2 ** 2
strans2 = s2 % NT2 ** 2
stransx1 = np.floor(strans1 / NT2)
stransx2 = np.floor(strans2 / NT2)
stransy1 = strans1 % NT2
stransy2 = strans2 % NT2
alphas1 = (sangle1 - sangle2) % (NT1)
alphas2 = (stransx1 - stransx2) % (NT2)
alphas3 = (stransy1 - stransy2) % (NT2)
s1_all_shapes = (
np.tile(s1, (int(NC / 2)))
+ NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten()
)
s2_all_shapes = (
np.tile(s2, (int(NC / 2)))
+ NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten()
)
alphas_all_shapes1 = np.tile(alphas1, int(NC / 2))
alphas_all_shapes2 = np.tile(alphas2, int(NC / 2))
alphas_all_shapes3 = np.tile(alphas3, int(NC / 2))
alphas = (alphas1, alphas2, alphas3)
alphas_all_shapes = (alphas_all_shapes1, alphas_all_shapes2, alphas_all_shapes3)
return s1, s2, s1_all_shapes, s2_all_shapes, alphas, alphas_all_shapes
def x_to_image(x):
"""Takes a single input x and transforms it into image for im.show"""
if x.dim() == 2:
n_channels = 1
else:
n_channels = x.shape[0]
n_pixels = x.shape[1]
x_image = x.reshape(n_channels, n_pixels, n_pixels)
x_image = x_image.permute(1, 2, 0)
# sequeeze to remove in case of a singel channel
x_image = x_image.squeeze()
return x_image
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
from datasets import datasets
from cci_variational_autoencoder import CCIVariationalAutoEncoder
BATCH_SIZE = 16
@pytest.fixture(scope="module")
def rotated_mnist():
rotated_mnist = datasets.ProjectiveMNIST(
BATCH_SIZE,
n_rotations=9,
train_set_proportion=0.001,
test_set_proportion=0.001,
valid_set_proportion=0.001,
)
return rotated_mnist
@pytest.fixture(scope="module")
def simple_shapes():
batch_size = 16
return datasets.SimpleShapes(batch_size, n_classes=10)
class TestCCIVariationalAutoEncoder:
def test_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
def test_beta_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
def test_cci_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=100.0,
c_max=36.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
class TestProjectiveMNISTVAE:
def test_vae(self, rotated_mnist):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
rotated_mnist,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train(stop_early=True)
def test_cci_vae(self, rotated_mnist):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
rotated_mnist,
beta=100.0,
c_max=36.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train(stop_early=True)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
from datasets import transformations
from datasets import datasets
class TestSimpleShapes:
def test_train_loader(self):
simple_shapes = datasets.SimpleShapes(16, n_classes=3)
assert hasattr(simple_shapes, "train_loader")
assert hasattr(simple_shapes, "test_loader")
assert len(simple_shapes.train_loader) > 0
assert len(simple_shapes.test_loader) > 0
def test_transformations(self):
simple_shapes = datasets.SimpleShapes(
16,
n_classes=3,
n_rotations=9,
n_x_translations=5,
n_y_translations=10,
scaling_factors=(1.0, 1.2),
)
assert simple_shapes.total_n_transformations > 50
class TestProjectiveMNIST:
def test_creation(self):
"""Verifies rotated mnist is created properly"""
n_rotations = 9
batch_size = 16
train_size = 5000
rotated_mnist = datasets.ProjectiveMNIST(batch_size, n_rotations=n_rotations)
expected_n_batches = math.ceil(
(rotated_mnist.total_n_transformations ** 2) * train_size / batch_size
)
assert len(rotated_mnist.train_loader) == expected_n_batches
# test shape of x2
assert rotated_mnist.X_train[3][1].shape == torch.Size([1, 28, 28])
def test_proportion(self):
n_rotations = 9
batch_size = 16
train_proportion = 0.001
test_proportion = 0.005
# 10k for validation
full_train_size = 50000
full_test_size = 10000
rotated_mnist = datasets.ProjectiveMNIST(
batch_size,
n_rotations=n_rotations,
train_set_proportion=train_proportion,
valid_set_proportion=train_proportion,
test_set_proportion=test_proportion,
)
expected_train_size = (
full_train_size * train_proportion * (n_rotations + 1) ** 2
)
expected_test_size = full_test_size * test_proportion * (n_rotations + 1) ** 2
assert len(rotated_mnist.X_train) == expected_train_size
assert len(rotated_mnist.X_test) == expected_test_size
class TestTransformations:
def test_transform(self):
shape = (1, 30, 30)
image = torch.rand(shape)
params = transformations.Params(angle=45.0)
rotated_X = transformations.transform(image, params)
assert torch.is_tensor(rotated_X)
assert rotated_X.shape == image.shape
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
from datasets import datasets
from autoencoder import AutoEncoder
class TestAutoencoder:
@pytest.fixture(scope="module")
def simple_shapes(self):
batch_size = 4
return datasets.SimpleShapes(batch_size, n_classes=10, n_rotations=3)
def test_autoencoder(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate
)
model.run(stop_early=True)
def test_autoencoder_with_shift_operator(self, simple_shapes):
"""Tests autoencoder with latent rotation"""
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="ShiftOperator",
)
model.run(stop_early=True)
def test_autoencoder_with_disentangled_rotation(self, simple_shapes):
"""Tests autoencoder with latent rotation"""
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="DisentangledRotation",
)
model.run(stop_early=True)
class TestProjectiveMnistAutoencoder:
def __init__(self):
self.n_epochs = 1
self.learning_rate = 0.01
def test_standard_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist, n_epochs=self.n_epochs, learning_rate=self.learning_rate
)
model.run(stop_early=True)
def test_rotated_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist,
z_dim=400,
latent_operator_name="DisentangledRotation",
n_epochs=self.n_epochs,
learning_rate=self.learning_rate,
)
model.run(stop_early=True)
def test_shift_operator_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist,
z_dim=400,
latent_operator_name="ShiftOperator",
n_epochs=self.n_epochs,
learning_rate=self.learning_rate,
)
model.run(stop_early=True)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import torch
import sys
sys.path.append("..")
from datasets import datasets
from weakly_complex_shift_autoencoder import WeaklyComplexAutoEncoder
from complex_shift_autoencoder import ComplexAutoEncoder
import sys
import os
import numpy as np
import random
import torch.backends.cudnn as cudnn
use_cuda = True if torch.cuda.is_available() else False
parser = argparse.ArgumentParser(
description="Fully/Weakly supervised version of shift operator"
)
# General arguments
parser.add_argument("--seed", type=int, default=0)
parser.add_argument(
"--output_directory",
type=str,
default="output",
help="In this directory the models will be "
"saved. Will be created if doesn't exist.",
)
parser.add_argument("--n_epochs", type=int, default="10", help="Number of epochs.")
parser.add_argument("--lr", type=float, default="0.001", help="Learning rate.")
parser.add_argument("--bs", type=int, default="16", help="Batch size.")
parser.add_argument(
"--n_rot", type=int, default="9", help="Number of rotations (for the model)."
)
parser.add_argument(
"--data_n_rot", type=int, default="9", help="Number of rotations (for the data)."
)
parser.add_argument(
"--n_x",
type=int,
default="0",
help="Number of x translations in x (for the model).",
)
parser.add_argument(
"--data_n_x",
type=int,
default="0",
help="Number of x translations in x (for the data).",
)
parser.add_argument(
"--n_y",
type=int,
default="0",
help="Number of y translations in y (for the model).",
)
parser.add_argument(
"--data_n_y",
type=int,
default="0",
help="Number of y translations in y (for the data).",
)
parser.add_argument("--tr_prop", type=float, default="0.01", help="Train proportion.")
parser.add_argument("--te_prop", type=float, default="0.01", help="Test proportion.")
parser.add_argument("--val_prop", type=float, default="0.01", help="Valid proportion.")
parser.add_argument("--n_classes", type=int, default="300", help="Number of classes.")
parser.add_argument("--dataset", type=str, default="mnist", help="Dataset")
parser.add_argument(
"--sftmax", type=int, default="1", help="If 1, switches to weighting and summing (deprecated softmax is always used)"
)
parser.add_argument("--tau", type=float, default=0.1, help="Temperature of softmax.")
parser.add_argument("--mode", type=str, default="train", help="training or test mode")
parser.add_argument("--supervised", type=int, default=0, help="Switches between weakly and fully supervised.")
def main(params):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
args = parser.parse_args(params)
SEED = int(args.seed)
random.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
torch.cuda.manual_seed_all(SEED)
if args.dataset == "simpleshapes":
data = datasets.SimpleShapes(
batch_size=args.bs,
n_x_translations=args.data_n_x,
n_y_translations=args.data_n_y,
n_rotations=args.data_n_rot,
n_classes=args.n_classes,
n_pixels=28,
)
elif args.dataset == "mnist":
data = datasets.ProjectiveMNIST(
batch_size=args.bs,
n_x_translations=args.data_n_x,
n_y_translations=args.data_n_y,
n_rotations=args.data_n_rot,
train_set_proportion=args.tr_prop,
test_set_proportion=args.te_prop,
valid_set_proportion=args.val_prop,
)
if args.mode == "train":
print("Training")
if args.mode == "test":
print("Testing")
# automatically set z_dim to image size
image_size = data.n_pixels ** 2
if not os.path.exists(args.output_directory):
os.mkdir(args.output_directory)
dict_args = vars(args)
save_name = "_".join(
[
"{0}_{1}".format(key, dict_args[key])
for key in dict_args
if key not in ["output_directory", "mode"]
]
)
if args.supervised:
transformation_types = []
indexes = []
if args.n_rot > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(0)
if args.n_x > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(1)
if args.n_y > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(2)
model_with_rotation = ComplexAutoEncoder(
data,
transformation_types=transformation_types,
indexes=indexes,
device=device,
z_dim=image_size,
seed=SEED,
output_directory=args.output_directory,
save_name=save_name,
n_rotations=args.n_rot,
n_x_translations=args.n_x,
n_y_translations=args.n_y,
)
n_transfos = len(indexes)
else:
model_with_rotation = WeaklyComplexAutoEncoder(
data,
transformation_type="ComplexShiftOperator",
device=device,
z_dim=image_size,
seed=SEED,
temperature=args.tau,
output_directory=args.output_directory,
save_name=save_name,
use_softmax=args.sftmax,
n_rotations=args.n_rot,
n_x_translations=args.n_x,
n_y_translations=args.n_y,
)
if args.mode == "train":
(
train_loss,
valid_loss,
train_mse,
valid_mse,
test_mse,
) = model_with_rotation.run(n_epochs=args.n_epochs, learning_rate=args.lr)
perf = np.array([train_mse, valid_mse, test_mse])
torch.save(perf, os.path.join(args.output_directory, "final_mse_" + save_name))
torch.save(
train_loss, os.path.join(args.output_directory, "train_loss_" + save_name)
)
torch.save(
valid_loss, os.path.join(args.output_directory, "valid_loss_" + save_name)
)
file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name)
path_to_model = os.path.join(args.output_directory, file_name)
best_mse, best_epoch = model_with_rotation.load_model(path_to_model)
##### Plots train reconstructions
samples_pairs = np.random.randint(
0, len(model_with_rotation.data.X_train), size=(10,)
).tolist()
model_with_rotation.plot_x2_reconstructions(
indices=samples_pairs,
train_set=True,
save_name=os.path.join(args.output_directory, "plots_train_reconstructions_" + save_name),
)
##### Plots train rotations of samples
train_indices = np.random.randint(
0, len(model_with_rotation.data.X_orig_train), size=(10,)
).tolist()
figsave_name=os.path.join(args.output_directory, "plots_train_rotations_" + save_name + '.png')
if args.supervised:
if n_transfos == 1:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True,
param_name=param_name, save_name=figsave_name
)
else:
model_with_rotation.plot_multiple_transformations_stacked(indices=train_indices, train_set = True,
n_plots = 10, save_name=figsave_name
)
else:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True,
param_name=param_name,save_name=figsave_name
)
##### Plots test reconstructions
samples_pairs = np.random.randint(
0, len(model_with_rotation.data.X_test), size=(10,)
).tolist()
model_with_rotation.plot_x2_reconstructions(
indices=samples_pairs,
train_set=False,
save_name=os.path.join(args.output_directory, "plots_test_reconstructions_" + save_name),
)
##### Plots test rotations of samples
test_indices = np.random.randint(
0, len(model_with_rotation.data.X_orig_test), size=(10,)
).tolist()
figsave_name=os.path.join(args.output_directory, "plots_test_rotations_" + save_name + '.png')
if args.supervised:
if n_transfos == 1:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False,
param_name=param_name, save_name=figsave_name
)
else:
model_with_rotation.plot_multiple_transformations_stacked(indices=test_indices, train_set = False,
n_plots = 10, save_name=figsave_name
)
else:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False,
param_name=param_name, save_name=figsave_name
)
elif args.mode == "test":
file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name)
path_to_model = os.path.join(args.output_directory, file_name)
model_with_rotation.load_model(path_to_model)
if args.supervised:
loss_func = model_with_rotation.reconstruction_mse_transformed_z1
else:
loss_func = model_with_rotation.reconstruction_mse_transformed_z1_weak
test_mse = model_with_rotation.compute_test_loss(
loss_func, model_with_rotation.data.test_loader_batch_100
)
torch.save(
torch.FloatTensor([test_mse]),
os.path.join(
args.output_directory, "test_mse_" + model_with_rotation.save_name
),
)
if __name__ == "__main__":
main(sys.argv[1:])
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_HDD():
def __init__(self, config="BarraCuda"):
###############################
# Carbon per capacity
###############################
with open("hdd/hdd_consumer.json", 'r') as f:
hdd_config = json.load(f)
with open("hdd/hdd_enterprise.json", 'r') as f:
hdd_config.update(json.load(f))
assert config in hdd_config.keys() and "HDD configuration not found"
self.carbon_per_gb = hdd_config[config]
self.carbon = 0
return
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = carbon_per_gb
return
def get_carbon(self, ):
return self.carbon
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_Logic():
def __init__(self, process_node=14,
gpa="97",
carbon_intensity="loc_taiwan",
debug=False,
fab_yield=0.875):
self.debug = debug
###############################
# Energy per unit area
###############################
with open("logic/epa.json", 'r') as f:
epa_config = json.load(f)
###############################
# Raw materials per unit area
###############################
with open("logic/materials.json", 'r') as f:
materials_config = json.load(f)
###############################
# Gasses per unit area
###############################
if gpa == "95":
with open("logic/gpa_95.json", 'r') as f:
gpa_config = json.load(f)
elif gpa == "99":
with open("logic/gpa_99.json", 'r') as f:
gpa_config = json.load(f)
elif gpa == "97":
with open("logic/gpa_95.json", 'r') as f:
gpa_95_config = json.load(f)
with open("logic/gpa_99.json", 'r') as f:
gpa_99_config = json.load(f)
gpa_config = {}
for c in gpa_95_config.keys():
gas = (gpa_95_config[c] + gpa_99_config[c]) / 2.
gpa_config[c] = gas
else:
print("Error: Unsupported GPA value for FAB logic")
sys.exit()
###############################
# Carbon intensity of fab
###############################
if "loc" in carbon_intensity:
with open("carbon_intensity/location.json", 'r') as f:
loc_configs = json.load(f)
loc = carbon_intensity.replace("loc_", "")
assert loc in loc_configs.keys()
fab_ci = loc_configs[loc]
elif "src" in carbon_intensity:
with open("carbon_intensity/source.json", 'r') as f:
src_configs = json.load(f)
src = carbon_intensity.replace("src_", "")
assert src in src_configs.keys()
fab_ci = src_configs[src]
else:
print("Error: Carbon intensity must either be loc | src dependent")
sys.exit()
###############################
# Aggregating model
###############################
process_node = str(process_node) + "nm"
assert process_node in epa_config.keys()
assert process_node in gpa_config.keys()
assert process_node in materials_config.keys()
carbon_energy = fab_ci * epa_config[process_node]
carbon_gas = gpa_config[process_node]
carbon_materials = materials_config[process_node]
self.carbon_per_area = (carbon_energy + carbon_gas + carbon_materials)
self.carbon_per_area = self.carbon_per_area / fab_yield
if self.debug:
print("[Fab logic] Carbon/area from energy consumed" , carbon_energy)
print("[Fab logic] Carbon/area from gasses" , carbon_gas)
print("[Fab logic] Carbon/area from materials" , carbon_materials)
print("[Fab logic] Carbon/area aggregate" , self.carbon_per_area)
self.carbon = 0
return
def get_cpa(self,):
return self.carbon_per_area
def set_area(self, area):
self.area = area
self.carbon = self.area * self.carbon_per_area
def get_carbon(self, ):
return self.carbon
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
def main():
Fab_DRAM(config="ddr4_10nm")
Fab_SSD(config="nand_10nm")
Fab_Logic(gpa="95", carbon_intensity = "src_coal", debug=True,
process_node=10)
# Fab_Logic(gpa="97", carbon_intensity = "loc_taiwan", debug=True,
# process_node=14)
if __name__=="__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_DRAM():
def __init__(self, config = "ddr4_10nm", fab_yield=0.875):
###############################
# Carbon per capacity
###############################
with open("dram/dram_hynix.json", 'r') as f:
dram_config = json.load(f)
assert config in dram_config.keys() and "DRAM configuration not found"
self.fab_yield = fab_yield
self.carbon_per_gb = dram_config[config] / self.fab_yield
self.carbon = 0
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = self.carbon_per_gb * self.capacity
return
def get_carbon(self, ):
return self.carbon
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_SSD():
def __init__(self, config="nand_10nm", fab_yield=0.875):
###############################
# Carbon per capacity
###############################
with open("ssd/ssd_hynix.json", 'r') as f:
ssd_config = json.load(f)
with open("ssd/ssd_seagate.json", 'r') as f:
ssd_config.update(json.load(f))
with open("ssd/ssd_western.json", 'r') as f:
ssd_config.update(json.load(f))
assert config in ssd_config.keys() and "SSD configuration not found"
self.fab_yield = fab_yield
self.carbon_per_gb = ssd_config[config] / self.fab_yield
self.carbon = 0
return
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = self.carbon_per_gb * self.capacity
return
def get_carbon(self, ):
return self.carbon
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from hdd_model import Fab_HDD
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
debug = False
##############################
# Original Dell 740 LCA
##############################
#https://corporate.delltechnologies.com/content/dam/digitalassets/active/en/unauth/data-sheets/products/servers/lca_poweredge_r740.pdf
##############################
# Main Dell R740 integrated circuits
##############################
dellr740_large_ssd = 3840 # GB (3.84 TB x 8 SSD's)
dellr740_ssd = 400 # GB (400GB x 1 SSD)
dellr740_ssd_dram = 68 # GB (64 + 4GB ECC)
dellr740_dram = 36 # GB (32 + 4 ECC GB x 12)
ic_yield = 0.875
cpu_area = 6.98 #cm^2
##############################
# Estimated process technology node to mimic fairphone LCA process node
##############################
CPU_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
SSD_main = Fab_SSD(config = "nand_30nm", fab_yield = ic_yield)
SSD_secondary = Fab_SSD(config = "nand_30nm", fab_yield = ic_yield)
DRAM_SSD_main = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
DRAM_SSD_secondary = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
DRAM = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
##############################
# Computing carbon footprint of IC's
##############################
CPU_Logic.set_area(cpu_area)
DRAM.set_capacity(dellr740_dram)
DRAM_SSD_main.set_capacity(dellr740_ssd_dram)
SSD_main.set_capacity(dellr740_large_ssd)
DRAM_SSD_secondary.set_capacity(dellr740_ssd_dram)
SSD_secondary.set_capacity(dellr740_ssd)
##################################
# Computing the packaging footprint
##################################
# number of packages
ssd_main_nr = 12 + 1
ssd_secondary_nr = 12 + 1
dram_nr = 18 + 1
cpu_nr = 2
packaging_intensity = 150 # gram CO2
SSD_main_packaging = packaging_intensity * ssd_main_nr
SSD_secondary_packaging = packaging_intensity * ssd_secondary_nr
DRAM_packging = packaging_intensity * dram_nr
CPU_packaging = packaging_intensity * cpu_nr
total_packaging = SSD_main_packaging + \
SSD_secondary_packaging + \
DRAM_packging + \
CPU_packaging
total_packaging = total_packaging / 1000.
##################################
# Compute end-to-end carbon footprints
##################################
SSD_main_count = 8 # There are 8x3.84TB SSD's
SSD_main_co2 = (SSD_main.get_carbon() + \
DRAM_SSD_main.get_carbon() + \
SSD_main_packaging) / 1000.
SSD_main_co2 = SSD_main_co2 * SSD_main_count
SSD_secondary_count = 1 # There are 1x400GB SSD's
SSD_secondary_co2 = (SSD_secondary.get_carbon() + \
DRAM_SSD_secondary.get_carbon() + \
SSD_secondary_packaging) / 1000.
SSD_secondary_co2 = SSD_secondary_co2 * SSD_secondary_count
DRAM_count = 12 # There are 12 x (32GB+4GB ECC DRAM modules)
DRAM_co2 = (DRAM.get_carbon() + DRAM_packging) / 1000. * DRAM_count
CPU_count = 2
CPU_co2 = (CPU_Logic.get_carbon() + CPU_packaging) * CPU_count / 1000.
if debug:
print("ACT SSD main", SSD_main_co2, "kg CO2")
print("ACT SSD secondary", SSD_secondary_co2, "kg CO2")
print("ACT DRAM", DRAM_co2, "kg CO2")
print("ACT CPU", CPU_co2, "kg CO2")
print("ACT Packaging", total_packaging, "kg CO2")
print("--------------------------------")
print("ACT SSD main", SSD_main_co2, "kg CO2 vs. LCA 3373 kg CO2")
print("ACT SSD secondary", SSD_secondary_co2, "kg CO2 vs. LCA 64.1 kg CO2")
print("ACT DRAM", DRAM_co2, "kg CO2 vs. LCA 533 kg CO2")
print("ACT CPU", CPU_co2, "kg CO2 vs. LCA 47 kg CO2")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from hdd_model import Fab_HDD
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
debug = False
# Main Fairphone integrated circuits
fairphone3_ICs = ["IC analog switch",
"LED Flash",
"LED Flash",
"CMOS image sensor",
"Light sensor",
"Light sensor",
"LED Full Color",
"Image sensor",
"I.C WLAN",
"I.C WLAN",
"Audio power amplifier",
"IC analog switch",
"IC power amplifier",
"IC PMU",
"IC PMU",
"IC PMU",
"Sensor",
"NFC Microcontroller",
"IC transceiver",
"IC audio power",
]
# Main Fairphone integrated circuits' areas in mm^2
fairphone3_IC_areas = [0.85,
1.2,
1.2,
35,
0.89,
0.08,
0.25,
18,
11.6,
1.44,
12.96,
1.61,
6.3,
26.88,
0.77,
11.36,
7,
8.69,
11,
9.6]
fairphone_cpu_area = 46.4 #mm^2
fairphone_ram = 4 # GB
fairphone_storage = 64 # GB
ic_yield = 0.875
##################################
# Estimated process technology node to mimic fairphone LCA process node
# This initializes ACT with an older technology node.
##################################
# IC Logic node
IC_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
# CPU Application processor node
CPU_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
# DRAM Logic node
DRAM = Fab_DRAM(config = "ddr3_50nm", fab_yield=ic_yield)
# SSD Logic node
SSD = Fab_SSD(config = "nand_30nm", fab_yield=ic_yield)
##################################
# Computing the IC footprint
##################################
IC_Logic.set_area(sum(fairphone3_IC_areas)/100.)
CPU_Logic.set_area(fairphone_cpu_area/100.)
DRAM.set_capacity(fairphone_ram)
SSD.set_capacity(fairphone_storage)
##################################
# Computing the packaging footprint
##################################
#Number of packages
nr = len(fairphone3_ICs) + 1 + 1 + 1 # Fairphone ICs + CPU + DRAM + SSD
packaging_intensity = 150 # gram CO2
PackagingFootprint = nr * packaging_intensity
if debug:
print("ACT IC", IC_Logic.get_carbon(), "g CO2")
print("ACT CPU", CPU_Logic.get_carbon(), "g CO2")
print("ACT DRAM", DRAM.get_carbon(), "g CO2")
print("ACT SSD", SSD.get_carbon(), "g CO2")
print("ACT Packaging", PackagingFootprint, "g CO2")
print("--------------------------------")
ram_flash = (DRAM.get_carbon() + SSD.get_carbon() + packaging_intensity * 2) / 1000.
fairphone_ram_flash = 11
print("ACT RAM + Flash", ram_flash, "kg CO2 vs. LCA", fairphone_ram_flash, "kg CO2")
cpu = (CPU_Logic.get_carbon() + packaging_intensity) / 1000.
fairphone_cpu = 1.07
print("ACT CPU", cpu, "kg CO2 vs. LCA", fairphone_cpu, "kg CO2")
ics = (IC_Logic.get_carbon() + packaging_intensity * len(fairphone3_ICs)) / 1000.
fairphone_ics = 5.3
print("ACT ICs", ics, "kg CO2 vs. LCA", fairphone_ics, "kg CO2")
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements_file(path):
with open(path) as f:
reqs = []
for line in f:
line = line.strip()
reqs.append(line.split("==")[0])
return reqs
reqs_main = parse_requirements_file("requirements/main.txt")
reqs_dev = parse_requirements_file("requirements/dev.txt")
setuptools.setup(
name="active-mri-acquisition",
version="0.1.0",
author="Facebook AI Research",
description="A reinforcement learning environment for active MRI acquisition.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/active-mri-acquisition/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence :: Medical Imaging",
],
python_requires=">=3.7",
install_requires=reqs_main,
extras_require={"dev": reqs_main + reqs_dev},
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import nox
@nox.session()
def lint(session):
session.install("--upgrade", "setuptools", "pip")
session.install("-r", "requirements/dev.txt")
session.run("flake8", "activemri")
# session.run("black", "--check", "activemri")
@nox.session()
def mypy(session):
session.install("--upgrade", "setuptools", "pip")
session.install("-r", "requirements/dev.txt")
session.run("mypy", "activemri")
@nox.session()
def pytest(session) -> None:
session.install("--upgrade", "setuptools", "pip")
session.install("torch")
session.install("torchvision")
session.install("-e", ".")
session.run("pytest", "tests/core")
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import data, envs, experimental
__all__ = ["data", "envs", "experimental"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import cvpr19_models
__all__ = ["cvpr19_models"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import data, models, options, util
__all__ = ["data", "models", "options", "util"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ignite.engine
import logging
import os
import tempfile
import types
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.metrics import Loss
from tensorboardX import SummaryWriter
from typing import Any, Dict, Tuple
import activemri.experimental.cvpr19_models.data as data
import activemri.experimental.cvpr19_models.models as models
import activemri.experimental.cvpr19_models.options as options
import activemri.experimental.cvpr19_models.util as util
def run_validation_and_update_best_checkpoint(
engine: ignite.engine.Engine,
val_engine: ignite.engine.Engine = None,
progress_bar: ignite.contrib.handlers.ProgressBar = None,
val_loader: torch.utils.data.DataLoader = None,
trainer: "Trainer" = None,
):
val_engine.run(val_loader)
metrics = val_engine.state.metrics
if trainer.options.use_evaluator:
progress_bar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} "
f"MSE: {metrics['mse']:.3f} SSIM: {metrics['ssim']:.3f} loss_D: "
f"{metrics['loss_D']:.3f}"
)
else:
progress_bar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} "
f"MSE: {metrics['mse']:.3f} SSIM: {metrics['ssim']:.3f}"
)
trainer.completed_epochs += 1
score = -metrics["loss_D"] if trainer.options.only_evaluator else -metrics["mse"]
if score > trainer.best_validation_score:
trainer.best_validation_score = score
full_path = save_checkpoint_function(trainer, "best_checkpoint")
progress_bar.log_message(
f"Saved best checkpoint to {full_path}. Score: {score}. "
f"Iteration: {engine.state.iteration}"
)
def save_checkpoint_function(trainer: "Trainer", filename: str) -> str:
# Ensures atomic checkpoint save to avoid corrupted files if preempted during a save operation
tmp_filename = tempfile.NamedTemporaryFile(
delete=False, dir=trainer.options.checkpoints_dir
)
try:
torch.save(trainer.create_checkpoint(), tmp_filename)
except BaseException:
tmp_filename.close()
os.remove(tmp_filename.name)
raise
else:
tmp_filename.close()
full_path = os.path.join(trainer.options.checkpoints_dir, filename + ".pth")
os.rename(tmp_filename.name, full_path)
return full_path
def save_regular_checkpoint(
engine: ignite.engine.Engine,
trainer: "Trainer" = None,
progress_bar: ignite.contrib.handlers.ProgressBar = None,
):
full_path = save_checkpoint_function(trainer, "regular_checkpoint")
progress_bar.log_message(
f"Saved regular checkpoint to {full_path}. Epoch: {trainer.completed_epochs}, "
f"Iteration: {engine.state.iteration}"
)
class Trainer:
def __init__(self, options: types.SimpleNamespace):
self.reconstructor: torch.nn.Module = None
self.evaluator: torch.nn.Module = None
self.options = options
self.best_validation_score = -float("inf")
self.completed_epochs = 0
self.updates_performed = 0
criterion_gan = models.fft_utils.GANLossKspace(
use_mse_as_energy=options.use_mse_as_disc_energy,
grad_ctx=options.grad_ctx,
gamma=options.gamma,
options=self.options,
).to(options.device)
self.losses = {
"GAN": criterion_gan,
"NLL": models.fft_utils.gaussian_nll_loss,
}
if self.options.only_evaluator:
self.options.checkpoints_dir = os.path.join(
self.options.checkpoints_dir,
"evaluator",
)
if not os.path.exists(self.options.checkpoints_dir):
os.makedirs(self.options.checkpoints_dir)
def create_checkpoint(self) -> Dict[str, Any]:
return {
"reconstructor": self.reconstructor.state_dict(),
"evaluator": self.evaluator.state_dict()
if self.options.use_evaluator
else None,
"options": self.options,
"optimizer_G": self.optimizers["G"].state_dict(),
"optimizer_D": self.optimizers["D"].state_dict()
if self.options.use_evaluator
else None,
"completed_epochs": self.completed_epochs,
"best_validation_score": self.best_validation_score,
"updates_performed": self.updates_performed,
}
def get_loaders(
self,
) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:
train_data_loader, val_data_loader = data.create_data_loaders(self.options)
return train_data_loader, val_data_loader
def inference(self, batch):
self.reconstructor.eval()
with torch.no_grad():
(
zero_filled_image,
ground_truth,
mask,
) = models.fft_utils.preprocess_inputs(
batch, self.options.dataroot, self.options.device
)
# Get reconstructor output
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_image, mask
)
reconstructor_eval = None
ground_truth_eval = None
if self.evaluator is not None:
self.evaluator.eval()
reconstructor_eval = self.evaluator(
reconstructed_image, mask_embedding, mask
)
ground_truth_eval = self.evaluator(ground_truth, mask_embedding, mask)
# Compute magnitude (for val losses and plots)
zero_filled_image_magnitude = models.fft_utils.to_magnitude(
zero_filled_image
)
reconstructed_image_magnitude = models.fft_utils.to_magnitude(
reconstructed_image
)
ground_truth_magnitude = models.fft_utils.to_magnitude(ground_truth)
if self.options.dataroot == "KNEE_RAW": # crop data
reconstructed_image_magnitude = models.fft_utils.center_crop(
reconstructed_image_magnitude, [320, 320]
)
ground_truth_magnitude = models.fft_utils.center_crop(
ground_truth_magnitude, [320, 320]
)
zero_filled_image_magnitude = models.fft_utils.center_crop(
zero_filled_image_magnitude, [320, 320]
)
uncertainty_map = models.fft_utils.center_crop(
uncertainty_map, [320, 320]
)
return {
"ground_truth": ground_truth,
"zero_filled_image": zero_filled_image,
"reconstructed_image": reconstructed_image,
"ground_truth_magnitude": ground_truth_magnitude,
"zero_filled_image_magnitude": zero_filled_image_magnitude,
"reconstructed_image_magnitude": reconstructed_image_magnitude,
"uncertainty_map": uncertainty_map,
"mask": mask,
"reconstructor_eval": reconstructor_eval,
"ground_truth_eval": ground_truth_eval,
}
def load_from_checkpoint_if_present(self):
if not os.path.exists(self.options.checkpoints_dir):
return
self.logger.info(f"Checkpoint folder found at {self.options.checkpoints_dir}")
files = os.listdir(self.options.checkpoints_dir)
for filename in files:
if "regular_checkpoint" in filename:
self.logger.info(f"Loading checkpoint {filename}.pth")
checkpoint = torch.load(
os.path.join(self.options.checkpoints_dir, filename)
)
self.reconstructor.load_state_dict(checkpoint["reconstructor"])
if self.options.use_evaluator:
self.evaluator.load_state_dict(checkpoint["evaluator"])
self.optimizers["D"].load_state_dict(checkpoint["optimizer_D"])
self.optimizers["G"].load_state_dict(checkpoint["optimizer_G"])
self.completed_epochs = checkpoint["completed_epochs"]
self.best_validation_score = checkpoint["best_validation_score"]
self.updates_performed = checkpoint["updates_performed"]
def load_weights_from_given_checkpoint(self):
if self.options.weights_checkpoint is None:
return
elif not os.path.exists(self.options.weights_checkpoint):
raise FileNotFoundError("Specified weights checkpoint do not exist!")
self.logger.info(
f"Loading weights from checkpoint found at {self.options.weights_checkpoint}."
)
checkpoint = torch.load(self.options.weights_checkpoint)
self.reconstructor.load_state_dict(checkpoint["reconstructor"])
if (
self.options.use_evaluator
and "evaluator" in checkpoint
and checkpoint["evaluator"] is not None
):
self.evaluator.load_state_dict(checkpoint["evaluator"])
else:
self.logger.info("Evaluator was not loaded.")
def update(self, batch):
if not self.options.only_evaluator:
self.reconstructor.train()
(zero_filled_image, target, mask,) = models.fft_utils.preprocess_inputs(
batch, self.options.dataroot, self.options.device
)
# Get reconstructor output
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_image, mask
)
# ------------------------------------------------------------------------
# Update evaluator and compute generator GAN Loss
# ------------------------------------------------------------------------
loss_G_GAN = 0
loss_D = torch.tensor(0.0)
if self.evaluator is not None:
self.evaluator.train()
self.optimizers["D"].zero_grad()
fake = reconstructed_image
detached_fake = fake.detach()
if self.options.mask_embed_dim != 0:
mask_embedding = mask_embedding.detach()
output = self.evaluator(
detached_fake,
mask_embedding,
mask if self.options.add_mask_eval else None,
)
loss_D_fake = self.losses["GAN"](
output, False, mask, degree=0, pred_and_gt=(detached_fake, target)
)
real = target
output = self.evaluator(
real, mask_embedding, mask if self.options.add_mask_eval else None
)
loss_D_real = self.losses["GAN"](
output, True, mask, degree=1, pred_and_gt=(detached_fake, target)
)
loss_D = loss_D_fake + loss_D_real
loss_D.backward(retain_graph=True)
self.optimizers["D"].step()
if not self.options.only_evaluator:
output = self.evaluator(
fake, mask_embedding, mask if self.options.add_mask_eval else None
)
loss_G_GAN = self.losses["GAN"](
output,
True,
mask,
degree=1,
updateG=True,
pred_and_gt=(fake, target),
)
loss_G_GAN *= self.options.lambda_gan
# ------------------------------------------------------------------------
# Update reconstructor
# ------------------------------------------------------------------------
loss_G = torch.tensor(0.0)
if not self.options.only_evaluator:
self.optimizers["G"].zero_grad()
loss_G = self.losses["NLL"](
reconstructed_image, target, uncertainty_map, self.options
).mean()
loss_G += loss_G_GAN
loss_G.backward()
self.optimizers["G"].step()
self.updates_performed += 1
return {"loss_D": loss_D.item(), "loss_G": loss_G.item()}
def discriminator_loss(
self,
reconstructor_eval,
target_eval,
reconstructed_image=None,
target=None,
mask=None,
):
if self.evaluator is None:
return 0
with torch.no_grad():
loss_D_fake = self.losses["GAN"](
reconstructor_eval,
False,
mask,
degree=0,
pred_and_gt=(reconstructed_image, target),
)
loss_D_real = self.losses["GAN"](
target_eval,
True,
mask,
degree=1,
pred_and_gt=(reconstructed_image, target),
)
return loss_D_fake + loss_D_real
def __call__(self) -> float:
self.logger = logging.getLogger()
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(
os.path.join(self.options.checkpoints_dir, "trainer.log")
)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.info("Creating trainer with the following options:")
for key, value in vars(self.options).items():
if key == "device":
value = value.type
elif key == "gpu_ids":
value = "cuda : " + str(value) if torch.cuda.is_available() else "cpu"
self.logger.info(f" {key:>25}: {'None' if value is None else value:<30}")
# Create Reconstructor Model
self.reconstructor = models.reconstruction.ReconstructorNetwork(
number_of_cascade_blocks=self.options.number_of_cascade_blocks,
n_downsampling=self.options.n_downsampling,
number_of_filters=self.options.number_of_reconstructor_filters,
number_of_layers_residual_bottleneck=self.options.number_of_layers_residual_bottleneck,
mask_embed_dim=self.options.mask_embed_dim,
dropout_probability=self.options.dropout_probability,
img_width=self.options.image_width,
use_deconv=self.options.use_deconv,
)
if self.options.device.type == "cuda":
self.reconstructor = torch.nn.DataParallel(self.reconstructor).to(
self.options.device
)
self.optimizers = {
"G": optim.Adam(
self.reconstructor.parameters(),
lr=self.options.lr,
betas=(self.options.beta1, 0.999),
)
}
# Create Evaluator Model
if self.options.use_evaluator:
self.evaluator = models.evaluator.EvaluatorNetwork(
number_of_filters=self.options.number_of_evaluator_filters,
number_of_conv_layers=self.options.number_of_evaluator_convolution_layers,
use_sigmoid=False,
width=self.options.image_width,
height=640 if self.options.dataroot == "KNEE_RAW" else None,
mask_embed_dim=self.options.mask_embed_dim,
)
self.evaluator = torch.nn.DataParallel(self.evaluator).to(
self.options.device
)
self.optimizers["D"] = optim.Adam(
self.evaluator.parameters(),
lr=self.options.lr,
betas=(self.options.beta1, 0.999),
)
train_loader, val_loader = self.get_loaders()
self.load_from_checkpoint_if_present()
self.load_weights_from_given_checkpoint()
writer = SummaryWriter(self.options.checkpoints_dir)
# Training engine and handlers
train_engine = Engine(lambda engine, batch: self.update(batch))
val_engine = Engine(lambda engine, batch: self.inference(batch))
validation_mse = Loss(
loss_fn=F.mse_loss,
output_transform=lambda x: (
x["reconstructed_image_magnitude"],
x["ground_truth_magnitude"],
),
)
validation_mse.attach(val_engine, name="mse")
validation_ssim = Loss(
loss_fn=util.common.compute_ssims,
output_transform=lambda x: (
x["reconstructed_image_magnitude"],
x["ground_truth_magnitude"],
),
)
validation_ssim.attach(val_engine, name="ssim")
if self.options.use_evaluator:
validation_loss_d = Loss(
loss_fn=self.discriminator_loss,
output_transform=lambda x: (
x["reconstructor_eval"],
x["ground_truth_eval"],
{
"reconstructed_image": x["reconstructed_image"],
"target": x["ground_truth"],
"mask": x["mask"],
},
),
)
validation_loss_d.attach(val_engine, name="loss_D")
progress_bar = ProgressBar()
progress_bar.attach(train_engine)
train_engine.add_event_handler(
Events.EPOCH_COMPLETED,
run_validation_and_update_best_checkpoint,
val_engine=val_engine,
progress_bar=progress_bar,
val_loader=val_loader,
trainer=self,
)
# Tensorboard Plots
@train_engine.on(Events.ITERATION_COMPLETED)
def plot_training_loss(engine):
writer.add_scalar(
"training/generator_loss",
engine.state.output["loss_G"],
self.updates_performed,
)
if "loss_D" in engine.state.output:
writer.add_scalar(
"training/discriminator_loss",
engine.state.output["loss_D"],
self.updates_performed,
)
@train_engine.on(Events.EPOCH_COMPLETED)
def plot_validation_loss(_):
writer.add_scalar(
"validation/MSE", val_engine.state.metrics["mse"], self.completed_epochs
)
writer.add_scalar(
"validation/SSIM",
val_engine.state.metrics["ssim"],
self.completed_epochs,
)
if "loss_D" in val_engine.state.metrics:
writer.add_scalar(
"validation/loss_D",
val_engine.state.metrics["loss_D"],
self.completed_epochs,
)
@train_engine.on(Events.EPOCH_COMPLETED)
def plot_validation_images(_):
ground_truth = val_engine.state.output["ground_truth_magnitude"]
zero_filled_image = val_engine.state.output["zero_filled_image_magnitude"]
reconstructed_image = val_engine.state.output[
"reconstructed_image_magnitude"
]
uncertainty_map = val_engine.state.output["uncertainty_map"]
difference = torch.abs(ground_truth - reconstructed_image)
# Create plots
ground_truth = util.common.create_grid_from_tensor(ground_truth)
writer.add_image(
"validation_images/ground_truth", ground_truth, self.completed_epochs
)
zero_filled_image = util.common.create_grid_from_tensor(zero_filled_image)
writer.add_image(
"validation_images/zero_filled_image",
zero_filled_image,
self.completed_epochs,
)
reconstructed_image = util.common.create_grid_from_tensor(
reconstructed_image
)
writer.add_image(
"validation_images/reconstructed_image",
reconstructed_image,
self.completed_epochs,
)
uncertainty_map = util.common.gray2heatmap(
util.common.create_grid_from_tensor(uncertainty_map.exp()),
cmap="jet",
)
writer.add_image(
"validation_images/uncertainty_map",
uncertainty_map,
self.completed_epochs,
)
difference = util.common.create_grid_from_tensor(difference)
difference = util.common.gray2heatmap(difference, cmap="gray")
writer.add_image(
"validation_images/difference", difference, self.completed_epochs
)
mask = util.common.create_grid_from_tensor(
val_engine.state.output["mask"].repeat(
1, 1, val_engine.state.output["mask"].shape[3], 1
)
)
writer.add_image(
"validation_images/mask_image", mask, self.completed_epochs
)
train_engine.add_event_handler(
Events.EPOCH_COMPLETED,
save_regular_checkpoint,
trainer=self,
progress_bar=progress_bar,
)
train_engine.run(train_loader, self.options.max_epochs - self.completed_epochs)
writer.close()
return self.best_validation_score
if __name__ == "__main__":
options_ = options.train_options.TrainOptions().parse()
options_.device = (
torch.device("cuda:{}".format(options_.gpu_ids[0]))
if options_.gpu_ids
else torch.device("cpu")
)
options_.checkpoints_dir = os.path.join(options_.checkpoints_dir, options_.name)
if not os.path.exists(options_.checkpoints_dir):
os.makedirs(options_.checkpoints_dir)
trainer_ = Trainer(options_)
trainer_()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_options
class TrainOptions(base_options.BaseOptions):
def initialize(self, parser):
parser = base_options.BaseOptions.initialize(self, parser)
parser.add_argument(
"--beta1", type=float, default=0.5, help="momentum term of adam"
)
parser.add_argument(
"--lr", type=float, default=0.0002, help="initial learning rate for adam"
)
parser.add_argument(
"--mask_type",
type=str,
choices=[
"basic",
"symmetric_basic",
"low_to_high",
"grid",
"symmetric_grid",
"basic_rnl",
"symmetric_basic_rnl",
"low_to_high_rnl",
],
help="The type of mask to use.",
)
parser.add_argument(
"--rnl_params",
type=str,
default=None,
help="Characterizes the distribution of initial masks (when these are sampled, see "
"--train_with_fixed_initial_mask). "
"Format is min_lowf_lines,max_lowf_lines,highf_beta_alpha,highf_beta_beta. "
"Mask have a random number of low frequency lines active, uniform between "
"min_lowf_lines and max_lowf_lines. The remaining number of lines is determined by "
"a Beta(highf_beta_alpha, highf_beta_beta) distribution, which indicates the "
"proportion of the remaining lines to sample.",
)
parser.add_argument(
"--debug", action="store_true", help="Activates debug level messages."
)
parser.add_argument(
"--add_mask_eval",
action="store_true",
help="Sum mask values to observation in evaluator model.",
)
parser.add_argument("--weights_checkpoint", type=str, default=None)
# parser.add_argument("--validation_train_split_ratio", type=float, default=0.9)
parser.add_argument(
"--max_epochs",
type=int,
default=100,
help="number of epochs to train (default: 5)",
)
# parser.add_argument("--save_freq", type=int, default=200)
# Options for Reconstruction Model
parser.add_argument("--number_of_reconstructor_filters", type=int, default=128)
parser.add_argument("--dropout_probability", type=float, default=0)
parser.add_argument("--number_of_cascade_blocks", type=int, default=3)
parser.add_argument(
"--number_of_layers_residual_bottleneck", type=int, default=6
)
parser.add_argument("--n_downsampling", type=int, default=3)
parser.add_argument("--use_deconv", type=bool, default=True)
# Options for Evaluator Model
parser.add_argument(
"--no_evaluator", dest="use_evaluator", action="store_false"
)
parser.add_argument("--number_of_evaluator_filters", type=int, default=128)
parser.add_argument(
"--number_of_evaluator_convolution_layers", type=int, default=4
)
# Options for both Reconstructor and Evaluator Model
parser.add_argument("--mask_embed_dim", type=int, default=6)
parser.add_argument("--image_width", type=int, default=128)
# Options moved from old model file
parser.add_argument(
"--use_mse_as_disc_energy",
action="store_true",
help="use MSE as evaluator energy",
)
parser.add_argument(
"--grad_ctx",
action="store_true",
help="GAN criterion computes adversarial loss signal at provided k-space lines.",
)
parser.add_argument(
"--lambda_gan",
type=float,
default=0.01,
help="Weight for reconstruction loss.",
)
parser.add_argument("--gamma", type=int, default=100)
parser.add_argument(
"--only_evaluator", dest="only_evaluator", action="store_true"
)
return parser
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_options, train_options # noqa:F401
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
class BaseOptions:
def __init__(self):
self.initialized = False
self.parser = None
def initialize(self, parser):
parser.add_argument(
"--dataset_dir", required=True, help="Path to fastmri dataset."
)
parser.add_argument(
"--dataroot",
required=True,
help="Path to images (should have subfolders trainA, trainB, valA, valB, etc)",
)
parser.add_argument(
"--batchSize", type=int, default=1, help="Input batch size."
)
parser.add_argument(
"--gpu_ids",
type=str,
default="0",
help="GPU IDs: e.g. 0 0,1,2, 0,2. use -1 for CPU.",
)
parser.add_argument(
"--name",
type=str,
default="experiment_name",
help="Name of the experiment. It determines the sub folder where results are stored.",
)
parser.add_argument(
"--nThreads", default=4, type=int, help="Number of threads for data loader."
)
parser.add_argument(
"--checkpoints_dir",
type=str,
default="./checkpoints",
help="Root directory to save results and model checkpoints.",
)
parser.add_argument(
"--init_type",
type=str,
choices=["normal", "xavier", "kaiming", "orthogonal"],
default="normal",
help="Network weights initialization type.",
)
parser.add_argument(
"--num_volumes_train",
type=int,
default=None,
help="Number of MRI volumes to use for training.",
)
parser.add_argument(
"--num_volumes_val",
type=int,
default=None,
help="Number of MRI volumes to use for validation.",
)
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
parser = None
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
allow_abbrev=False,
)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ""
message += "----------------- Options ---------------\n"
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = "\t[default: %s]" % str(default)
message += "{:>25}: {:<30}{}\n".format(str(k), str(v), comment)
message += "----------------- End -------------------"
print(message)
def parse(self, silent=True):
opt = self.gather_options()
# set gpu ids
str_ids = opt.gpu_ids.split(",")
opt.gpu_ids = []
# for str_id in str_ids:
for str_id in range(len(str_ids)):
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
opt.batchSize *= len(opt.gpu_ids)
print(
f"Use multiple GPUs, batchSize are increased by {len(opt.gpu_ids)} "
f"times to {opt.batchSize}"
)
if not silent:
self.print_options(opt)
return opt
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import common
__all__ = ["common"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from typing import Dict, Optional
import matplotlib.pyplot as plt
import numpy as np
import skimage.measure
import torch
import torchvision.utils as tvutil
def load_checkpoint(checkpoint_path: str) -> Optional[Dict]:
if os.path.isfile(checkpoint_path):
logging.info(f"Found checkpoint at {checkpoint_path}.")
return torch.load(checkpoint_path)
logging.info(f"No checkpoint found at {checkpoint_path}.")
return None
def compute_ssims(xs, ys):
ssims = []
for i in range(xs.shape[0]):
ssim = skimage.measure.compare_ssim(
xs[i, 0].cpu().numpy(),
ys[i, 0].cpu().numpy(),
data_range=ys[i, 0].cpu().numpy().max(),
)
ssims.append(ssim)
return np.array(ssims).mean()
def compute_psnrs(xs, ys):
psnrs = []
for i in range(xs.shape[0]):
psnr = skimage.measure.compare_psnr(
xs[i, 0].cpu().numpy(),
ys[i, 0].cpu().numpy(),
data_range=ys[i, 0].cpu().numpy().max(),
)
psnrs.append(psnr)
return np.array(psnrs).mean()
def compute_mse(xs, ys):
return np.mean((ys.cpu().numpy() - xs.cpu().numpy()) ** 2)
def compute_nmse(xs, ys):
ys_numpy = ys.cpu().numpy()
return (
np.linalg.norm(ys_numpy - xs.cpu().numpy()) ** 2 / np.linalg.norm(ys_numpy) ** 2
)
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8, renormalize=True):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
# do normalization first, since we working on fourier space. we need to clamp
if renormalize:
image_tensor.add_(1).div_(2)
image_tensor.mul_(255).clamp_(0, 255)
if len(image_tensor.shape) == 4:
image_numpy = image_tensor[0].cpu().float().numpy()
else:
image_numpy = image_tensor.cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
return image_numpy.astype(imtype)
def create_grid_from_tensor(tensor_of_images, num_rows=4):
# take norm over real-imaginary dimension
# tensor_of_images = tensor_of_images.norm(dim=1, keepdim=True)
# make image grid
tensor_grid = tvutil.make_grid(
tensor_of_images, nrow=num_rows, normalize=True, scale_each=False
)
numpy_grid = tensor2im(tensor_grid, renormalize=False)
return numpy_grid
def gray2heatmap(grayimg, cmap="jet"):
cmap = plt.get_cmap(cmap)
rgba_img = cmap(grayimg)
# rgb_img = np.delete(rgba_img, 3, 2) * 255.0
rgb_img = rgba_img[:, :, :, 0] * 255.0
rgb_img = rgb_img.astype(np.uint8)
return rgb_img
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
cvpr19_models.models.reconstruction.py
======================================
MRI Reconstruction model as described in `Zhang, Zizhao, et al. "Reducing uncertainty in
undersampled mri reconstruction with active acquisition." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2019.`
"""
import functools
import torch
import torch.nn as nn
from . import fft_utils
def get_norm_layer(norm_type="instance"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
elif norm_type == "none":
norm_layer = None
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
def init_func(m):
init_type = "normal"
gain = 0.02
classname = m.__class__.__name__
if hasattr(m, "weight") and (
classname.find("Conv") != -1 or classname.find("Linear") != -1
):
if init_type == "normal":
torch.nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == "xavier":
torch.nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == "kaiming":
torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif init_type == "orthogonal":
torch.nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(
"initialization method [%s] is not implemented" % init_type
)
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, gain)
torch.nn.init.constant_(m.bias.data, 0.0)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, dropout_probability, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(
dim, padding_type, norm_layer, dropout_probability, use_bias
)
def build_conv_block(
self, dim, padding_type, norm_layer, dropout_probability, use_bias
):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True),
]
if dropout_probability > 0:
conv_block += [nn.Dropout(dropout_probability)]
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ReconstructorNetwork(nn.Module):
"""Reconstructor network used in Zhang et al., CVPR'19.
Args:
number_of_encoder_input_channels(int): Number of input channels to the
reconstruction model.
number_of_decoder_output_channels(int): Number of output channels
of the reconstruction model.
number_of_filters(int): Number of convolutional filters.\n
dropout_probability(float): Dropout probability.
number_of_layers_residual_bottleneck (int): Number of residual
blocks in each model between two consecutive down-
or up-sampling operations.
number_of_cascade_blocks (int): Number of times the entire architecture is
replicated.
mask_embed_dim(int): Dimensionality of the mask embedding.
padding_type(str): Convolution operation padding type.
n_downsampling(int): Number of down-sampling operations.
img_width(int): The width of the image.
use_deconv(binary): Whether to use deconvolution in the up-sampling.
"""
def __init__(
self,
number_of_encoder_input_channels=2,
number_of_decoder_output_channels=3,
number_of_filters=128,
dropout_probability=0.0,
number_of_layers_residual_bottleneck=6,
number_of_cascade_blocks=3,
mask_embed_dim=6,
padding_type="reflect",
n_downsampling=3,
img_width=128,
use_deconv=True,
):
super(ReconstructorNetwork, self).__init__()
self.number_of_encoder_input_channels = number_of_encoder_input_channels
self.number_of_decoder_output_channels = number_of_decoder_output_channels
self.number_of_filters = number_of_filters
self.use_deconv = use_deconv
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.number_of_cascade_blocks = number_of_cascade_blocks
self.use_mask_embedding = True if mask_embed_dim > 0 else False
if self.use_mask_embedding:
number_of_encoder_input_channels += mask_embed_dim
print("[Reconstructor Network] -> use masked embedding condition")
# Lists of encoder, residual bottleneck and decoder blocks for all cascade blocks
self.encoders_all_cascade_blocks = nn.ModuleList()
self.residual_bottlenecks_all_cascade_blocks = nn.ModuleList()
self.decoders_all_cascade_blocks = nn.ModuleList()
# Architecture for the Cascade Blocks
for iii in range(1, self.number_of_cascade_blocks + 1):
# Encoder for iii_th cascade block
encoder = [
nn.ReflectionPad2d(1),
nn.Conv2d(
number_of_encoder_input_channels,
number_of_filters,
kernel_size=3,
stride=2,
padding=0,
bias=use_bias,
),
norm_layer(number_of_filters),
nn.ReLU(True),
]
for i in range(1, n_downsampling):
mult = 2 ** i
encoder += [
nn.ReflectionPad2d(1),
nn.Conv2d(
number_of_filters * mult // 2,
number_of_filters * mult,
kernel_size=3,
stride=2,
padding=0,
bias=use_bias,
),
norm_layer(number_of_filters * mult),
nn.ReLU(True),
]
self.encoders_all_cascade_blocks.append(nn.Sequential(*encoder))
# Bottleneck for iii_th cascade block
residual_bottleneck = []
mult = 2 ** (n_downsampling - 1)
for i in range(number_of_layers_residual_bottleneck):
residual_bottleneck += [
ResnetBlock(
number_of_filters * mult,
padding_type=padding_type,
norm_layer=norm_layer,
dropout_probability=dropout_probability,
use_bias=use_bias,
)
]
self.residual_bottlenecks_all_cascade_blocks.append(
nn.Sequential(*residual_bottleneck)
)
# Decoder for iii_th cascade block
decoder = []
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - 1 - i)
if self.use_deconv:
decoder += [
nn.ConvTranspose2d(
number_of_filters * mult,
int(number_of_filters * mult / 2),
kernel_size=4,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(int(number_of_filters * mult / 2)),
nn.ReLU(True),
]
else:
decoder += [nn.Upsample(scale_factor=2), nn.ReflectionPad2d(1)] + [
nn.Conv2d(
number_of_filters * mult,
int(number_of_filters * mult / 2),
kernel_size=3,
stride=1,
padding=0,
bias=use_bias,
),
norm_layer(int(number_of_filters * mult / 2)),
nn.ReLU(True),
]
decoder += [
nn.Conv2d(
number_of_filters // 2,
number_of_decoder_output_channels,
kernel_size=1,
padding=0,
bias=False,
)
] # better
self.decoders_all_cascade_blocks.append(nn.Sequential(*decoder))
if self.use_mask_embedding:
self.mask_embedding_layer = nn.Sequential(
nn.Conv2d(img_width, mask_embed_dim, 1, 1)
)
self.apply(init_func)
def data_consistency(self, x, input, mask):
ft_x = fft_utils.fft(x)
fuse = (
fft_utils.ifft(
torch.where((1 - mask).byte(), ft_x, torch.tensor(0.0).to(ft_x.device))
)
+ input
)
return fuse
def embed_mask(self, mask):
b, c, h, w = mask.shape
mask = mask.view(b, w, 1, 1)
cond_embed = self.mask_embedding_layer(mask)
return cond_embed
# noinspection PyUnboundLocalVariable
def forward(self, zero_filled_input, mask):
"""Generates reconstructions given images with partial k-space info.
Args:
zero_filled_input(torch.Tensor): Image obtained from zero-filled reconstruction
of partial k-space scans.
mask(torch.Tensor): Mask used in creating the zero filled image from ground truth
image.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor): Contains:\n
* Reconstructed high resolution image.
* Uncertainty map.
* Mask_embedding.
"""
if self.use_mask_embedding:
mask_embedding = self.embed_mask(mask)
mask_embedding = mask_embedding.repeat(
1, 1, zero_filled_input.shape[2], zero_filled_input.shape[3]
)
encoder_input = torch.cat([zero_filled_input, mask_embedding], 1)
else:
encoder_input = zero_filled_input
mask_embedding = None
residual_bottleneck_output = None
for cascade_block, (encoder, residual_bottleneck, decoder) in enumerate(
zip(
self.encoders_all_cascade_blocks,
self.residual_bottlenecks_all_cascade_blocks,
self.decoders_all_cascade_blocks,
)
):
encoder_output = encoder(encoder_input)
if cascade_block > 0:
# Skip connection from previous residual block
encoder_output = encoder_output + residual_bottleneck_output
residual_bottleneck_output = residual_bottleneck(encoder_output)
decoder_output = decoder(residual_bottleneck_output)
reconstructed_image = self.data_consistency(
decoder_output[:, :-1, ...], zero_filled_input, mask
)
uncertainty_map = decoder_output[:, -1:, :, :]
if self.use_mask_embedding:
encoder_input = torch.cat([reconstructed_image, mask_embedding], 1)
else:
encoder_input = reconstructed_image
return reconstructed_image, uncertainty_map, mask_embedding
def init_from_checkpoint(self, checkpoint):
if not isinstance(self, nn.DataParallel):
self.load_state_dict(
{
# This assumes that environment code runs in a single GPU
key.replace("module.", ""): val
for key, val in checkpoint["reconstructor"].items()
}
)
else:
self.load_state_dict(checkpoint["reconstructor"])
return checkpoint["options"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import evaluator, fft_utils, reconstruction # noqa: F401
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
def roll(x, shift, dim):
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
# note that for IFFT we do not use irfft
# this function returns two channels where the first one (real part) is in image space
def ifftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifft(x, normalized=False, ifft_shift=False):
x = x.permute(0, 2, 3, 1)
y = torch.ifft(x, 2, normalized=normalized)
if ifft_shift:
y = ifftshift(y, dim=(1, 2))
return y.permute(0, 3, 1, 2)
def rfft(x, normalized=False):
# x is in gray scale and has 1-d in the 1st dimension
x = x.squeeze(1)
y = torch.rfft(x, 2, onesided=False, normalized=normalized)
return y.permute(0, 3, 1, 2)
def fft(x, normalized=False, shift=False):
x = x.permute(0, 2, 3, 1)
if shift:
x = fftshift(x, dim=(1, 2))
y = torch.fft(x, 2, normalized=normalized)
return y.permute(0, 3, 1, 2)
def center_crop(x, shape):
assert 0 < shape[0] <= x.shape[-2]
assert 0 < shape[1] <= x.shape[-1]
w_from = (x.shape[-1] - shape[0]) // 2
h_from = (x.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
x = x[..., h_from:h_to, w_from:w_to]
return x
def to_magnitude(tensor):
tensor = (tensor[:, 0, :, :] ** 2 + tensor[:, 1, :, :] ** 2) ** 0.5
return tensor.unsqueeze(1)
def dicom_to_0_1_range(tensor):
return (tensor.clamp(-3, 3) + 3) / 6
def gaussian_nll_loss(reconstruction, target, logvar, options):
reconstruction = to_magnitude(reconstruction)
target = to_magnitude(target)
if options.dataroot == "KNEE_RAW":
reconstruction = center_crop(reconstruction, [320, 320])
target = center_crop(target, [320, 320])
logvar = center_crop(logvar, [320, 320])
l2 = F.mse_loss(reconstruction, target, reduce=False)
# Clip logvar to make variance in [0.0001, 5], for numerical stability
logvar = logvar.clamp(-9.2, 1.609)
one_over_var = torch.exp(-logvar)
assert len(l2) == len(logvar)
return 0.5 * (one_over_var * l2 + logvar)
def preprocess_inputs(batch, dataroot, device, prev_reconstruction=None):
mask = batch[0].to(device)
target = batch[1].to(device)
if dataroot == "KNEE_RAW":
k_space = batch[2].permute(0, 3, 1, 2).to(device)
# alter mask to always include the highest frequencies that include padding
mask = torch.where(
to_magnitude(k_space).sum(2).unsqueeze(2) == 0.0,
torch.tensor(1.0).to(device),
mask,
)
if prev_reconstruction is None:
masked_true_k_space = torch.where(
mask.byte(), k_space, torch.tensor(0.0).to(device)
)
else:
prev_reconstruction = prev_reconstruction.clone()
prev_reconstruction[:, :, :160, :] = 0
prev_reconstruction[:, :, -160:, :] = 0
prev_reconstruction[:, :, :, :24] = 0
prev_reconstruction[:, :, :, -24:] = 0
ft_x = fft(prev_reconstruction, shift=True)
masked_true_k_space = torch.where(mask.byte(), k_space, ft_x)
reconstructor_input = ifft(masked_true_k_space, ifft_shift=True)
target = target.permute(0, 3, 1, 2)
else:
fft_target = fft(target)
if prev_reconstruction is None:
masked_true_k_space = torch.where(
mask.byte(), fft_target, torch.tensor(0.0).to(device)
)
else:
ft_x = fft(prev_reconstruction)
masked_true_k_space = torch.where(mask.byte(), fft_target, ft_x)
reconstructor_input = ifft(masked_true_k_space)
return reconstructor_input, target, mask
class GANLossKspace(nn.Module):
def __init__(
self,
use_lsgan=True,
use_mse_as_energy=False,
grad_ctx=False,
gamma=100,
options=None,
):
super(GANLossKspace, self).__init__()
# self.register_buffer('real_label', torch.ones(imSize, imSize))
# self.register_buffer('fake_label', torch.zeros(imSize, imSize))
self.grad_ctx = grad_ctx
self.options = options
if use_lsgan:
self.loss = nn.MSELoss(size_average=False)
else:
self.loss = nn.BCELoss(size_average=False)
self.use_mse_as_energy = use_mse_as_energy
if use_mse_as_energy:
self.gamma = gamma
self.bin = 5
def get_target_tensor(self, input, target_is_real, degree, mask, pred_and_gt=None):
if target_is_real:
target_tensor = torch.ones_like(input)
target_tensor[:] = degree
else:
target_tensor = torch.zeros_like(input)
if not self.use_mse_as_energy:
if degree != 1:
target_tensor[:] = degree
else:
pred, gt = pred_and_gt
if self.options.dataroot == "KNEE_RAW":
gt = center_crop(gt, [368, 320])
pred = center_crop(pred, [368, 320])
w = gt.shape[2]
ks_gt = fft(gt, normalized=True)
ks_input = fft(pred, normalized=True)
ks_row_mse = F.mse_loss(ks_input, ks_gt, reduce=False).sum(
1, keepdim=True
).sum(2, keepdim=True).squeeze() / (2 * w)
energy = torch.exp(-ks_row_mse * self.gamma)
target_tensor[:] = energy
# force observed part to always
for i in range(mask.shape[0]):
idx = torch.nonzero(mask[i, 0, 0, :])
target_tensor[i, idx] = 1
return target_tensor
def __call__(
self, input, target_is_real, mask, degree=1, updateG=False, pred_and_gt=None
):
# input [B, imSize]
# degree is the realistic degree of output
# set updateG to True when training G.
target_tensor = self.get_target_tensor(
input, target_is_real, degree, mask, pred_and_gt
)
b, w = target_tensor.shape
if updateG and not self.grad_ctx:
mask_ = mask.squeeze()
# maskout the observed part loss
masked_input = torch.where(
(1 - mask_).byte(), input, torch.tensor(0.0).to(input.device)
)
masked_target = torch.where(
(1 - mask_).byte(), target_tensor, torch.tensor(0.0).to(input.device)
)
return self.loss(masked_input, masked_target) / (1 - mask_).sum()
else:
return self.loss(input, target_tensor) / (b * w)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.