Dataset Viewer
python_code
stringlengths 0
229k
|
---|
## @package process
# Module doxygen.process
# Script to insert preamble for doxygen and regen API docs
import glob, os, shutil
# Module caffe2...caffe2.python.control_test
def insert(originalfile,first_line,description):
with open(originalfile,'r') as f:
f1 = f.readline()
if(f1.find(first_line)<0):
docs = first_line + description + f1
with open('newfile.txt','w') as f2:
f2.write(docs)
f2.write(f.read())
os.rename('newfile.txt',originalfile)
else:
print('already inserted')
# move up from /caffe2_root/doxygen
os.chdir("..")
os.system("git checkout caffe2/python/.")
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(".py"):
filepath = os.path.join(root, file)
print("filepath: " + filepath)
directory = os.path.dirname(filepath)[2:]
directory = directory.replace("/",".")
print "directory: " + directory
name = os.path.splitext(file)[0]
first_line = "## @package " + name
description = "\n# Module " + directory + "." + name + "\n"
print first_line,description
insert(filepath,first_line,description)
if os.path.exists("doxygen/doxygen-python"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-python")
else:
os.makedirs("doxygen/doxygen-python")
if os.path.exists("doxygen/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-c")
else:
os.makedirs("doxygen/doxygen-c")
os.system("doxygen .Doxyfile-python")
os.system("doxygen .Doxyfile-c")
|
## @package publish
# Module doxygen.publish
import os, shutil
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-c")
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-python"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-python")
os.system("cp -rf doxygen-c /Users/aaronmarkham/caffe2/")
os.system("cp -rf doxygen-python /Users/aaronmarkham/caffe2/")
|
import os
import torch
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
sources = ['src/my_lib.c']
headers = ['src/my_lib.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/my_lib_cuda.c']
headers += ['src/my_lib_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
ffi = create_extension(
'_ext.my_lib',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_compile_args=["-std=c99"]
)
if __name__ == '__main__':
ffi.build()
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from modules.add import MyAddModule
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.add = MyAddModule()
def forward(self, input1, input2):
return self.add(input1, input2)
model = MyNetwork()
x = torch.range(1, 25).view(5, 5)
input1, input2 = Variable(x), Variable(x * 4)
print(model(input1, input2))
print(input1 + input2)
if torch.cuda.is_available():
input1, input2, = input1.cuda(), input2.cuda()
print(model(input1, input2))
print(input1 + input2)
|
# functions/add.py
import torch
from torch.autograd import Function
from _ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_lib.my_lib_add_forward_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input = grad_output.new()
if not grad_output.is_cuda:
my_lib.my_lib_add_backward(grad_output, grad_input)
else:
my_lib.my_lib_add_backward_cuda(grad_output, grad_input)
return grad_input
|
from torch.nn.modules.module import Module
from functions.add import MyAddFunction
class MyAddModule(Module):
def forward(self, input1, input2):
return MyAddFunction()(input1, input2)
|
import os
import torch
from torch.utils.ffi import create_extension
this_file = os.path.dirname(__file__)
sources = ['my_package/src/my_lib.c']
headers = ['my_package/src/my_lib.h']
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['my_package/src/my_lib_cuda.c']
headers += ['my_package/src/my_lib_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
ffi = create_extension(
'my_package._ext.my_lib',
package=True,
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda
)
if __name__ == '__main__':
ffi.build()
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from my_package.modules.add import MyAddModule
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.add = MyAddModule()
def forward(self, input1, input2):
return self.add(input1, input2)
model = MyNetwork()
x = torch.range(1, 25).view(5, 5)
input1, input2 = Variable(x), Variable(x * 4)
print(model(input1, input2))
print(input1 + input2)
if torch.cuda.is_available():
input1, input2, = input1.cuda(), input2.cuda()
print(model(input1, input2))
print(input1 + input2)
|
# functions/add.py
import torch
from torch.autograd import Function
from .._ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_lib.my_lib_add_forward_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input = grad_output.new()
if not grad_output.is_cuda:
my_lib.my_lib_add_backward(grad_output, grad_input)
else:
my_lib.my_lib_add_backward_cuda(grad_output, grad_input)
return grad_input
|
from torch.nn.modules.module import Module
from ..functions.add import MyAddFunction
class MyAddModule(Module):
def forward(self, input1, input2):
return MyAddFunction()(input1, input2)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
def compute(x, y, z):
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + torch.relu(z)
inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(8, 5)]
trace = torch.jit.trace(compute, inputs)
print(trace.graph)
|
# Run `python setup.py build develop` before running this example!
import torch
torch.ops.load_library("warp_perspective.so")
print(torch.ops.my_ops.warp_perspective)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
print(torch.ops.my_ops.warp_perspective(torch.randn(32, 32), torch.rand(3, 3)))
|
import torch
import torch.utils.cpp_extension
op_source = """
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
/*type=*/CV_32FC1,
/*data=*/image.data<float>());
cv::Mat warp_mat(/*rows=*/warp.size(0),
/*cols=*/warp.size(1),
/*type=*/CV_32FC1,
/*data=*/warp.data<float>());
cv::Mat output_mat;
cv::warpPerspective(image_mat, output_mat, warp_mat, /*dsize=*/{64, 64});
torch::Tensor output =
torch::from_blob(output_mat.ptr<float>(), /*sizes=*/{64, 64});
return output.clone();
}
static auto registry =
torch::jit::RegisterOperators("my_ops::warp_perspective", &warp_perspective);
"""
torch.utils.cpp_extension.load_inline(
name="warp_perspective",
cpp_sources=op_source,
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True,
)
print(torch.ops.my_ops.warp_perspective)
|
import torch
import torch.utils.cpp_extension
torch.utils.cpp_extension.load(
name="warp_perspective",
sources=["example_app/warp_perspective/op.cpp"],
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True
)
print(torch.ops.my_ops.warp_perspective)
|
import torch
torch.ops.load_library("example_app/build/warp_perspective/libwarp_perspective.so")
@torch.jit.script
def compute(x, y):
if bool(x[0][0] == 42):
z = 5
else:
z = 10
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + z
print(compute.graph)
print(compute(torch.randn(4, 8), torch.randn(8, 5)))
compute.save("example.pt")
|
from __future__ import division
from __future__ import print_function
import argparse
import math
import time
import torch
TIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000}
parser = argparse.ArgumentParser()
parser.add_argument('example', choices=['py', 'cpp', 'cuda'])
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--features', type=int, default=32)
parser.add_argument('-s', '--state-size', type=int, default=128)
parser.add_argument('-r', '--runs', type=int, default=100)
parser.add_argument('--scale', choices=['s', 'ms', 'us'], default='us')
parser.add_argument('-c', '--cuda', action='store_true')
parser.add_argument('-d', '--double', action='store_true')
options = parser.parse_args()
if options.example == 'py':
from python.lltm import LLTM
elif options.example == 'cpp':
from cpp.lltm import LLTM
else:
from cuda.lltm import LLTM
options.cuda = True
device = torch.device("cuda") if options.cuda else torch.device("cpu")
dtype = torch.float64 if options.double else torch.float32
kwargs = {'dtype': dtype,
'device': device,
'requires_grad': True}
X = torch.randn(options.batch_size, options.features, **kwargs)
h = torch.randn(options.batch_size, options.state_size, **kwargs)
C = torch.randn(options.batch_size, options.state_size, **kwargs)
rnn = LLTM(options.features, options.state_size).to(device, dtype)
# Force CUDA initialization
new_h, new_C = rnn(X, (h, C))
(new_h.sum() + new_C.sum()).backward()
forward_min = math.inf
forward_time = 0
backward_min = math.inf
backward_time = 0
for _ in range(options.runs):
rnn.zero_grad()
start = time.time()
new_h, new_C = rnn(X, (h, C))
elapsed = time.time() - start
forward_min = min(forward_min, elapsed)
forward_time += elapsed
start = time.time()
(new_h.sum() + new_C.sum()).backward()
elapsed = time.time() - start
backward_min = min(backward_min, elapsed)
backward_time += elapsed
scale = TIME_SCALES[options.scale]
forward_min *= scale
backward_min *= scale
forward_average = forward_time / options.runs * scale
backward_average = backward_time / options.runs * scale
print('Forward: {0:.3f}/{1:.3f} {4} | Backward {2:.3f}/{3:.3f} {4}'.format(
forward_min, forward_average, backward_min, backward_average,
options.scale))
|
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch
import python.lltm_baseline
import cpp.lltm
def check_equal(first, second, verbose):
if verbose:
print()
for i, (x, y) in enumerate(zip(first, second)):
x = x.cpu().detach().numpy()
y = y.cpu().detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i))
def zero_grad(variables):
for variable in variables:
variable.grad.zero_()
def get_grads(variables):
return [var.grad.clone() for var in variables]
def check_forward(variables, with_cuda, verbose):
baseline_values = python.lltm_baseline.LLTMFunction.apply(*variables)
cpp_values = cpp.lltm.LLTMFunction.apply(*variables)
print('Forward: Baseline (Python) vs. C++ ... ', end='')
check_equal(baseline_values, cpp_values, verbose)
print('Ok')
if with_cuda:
cuda_values = cuda.lltm.LLTMFunction.apply(*variables)
print('Forward: Baseline (Python) vs. CUDA ... ', end='')
check_equal(baseline_values, cuda_values, verbose)
print('Ok')
def check_backward(variables, with_cuda, verbose):
baseline_values = python.lltm_baseline.LLTMFunction.apply(*variables)
(baseline_values[0] + baseline_values[1]).sum().backward()
grad_baseline = get_grads(variables)
zero_grad(variables)
cpp_values = cpp.lltm.LLTMFunction.apply(*variables)
(cpp_values[0] + cpp_values[1]).sum().backward()
grad_cpp = get_grads(variables)
print('Backward: Baseline (Python) vs. C++ ... ', end='')
check_equal(grad_baseline, grad_cpp, verbose)
print('Ok')
if with_cuda:
zero_grad(variables)
cuda_values = cuda.lltm.LLTMFunction.apply(*variables)
(cuda_values[0] + cuda_values[1]).sum().backward()
grad_cuda = get_grads(variables)
print('Backward: Baseline (Python) vs. CUDA ... ', end='')
check_equal(grad_baseline, grad_cuda, verbose)
print('Ok')
parser = argparse.ArgumentParser()
parser.add_argument('direction', choices=['forward', 'backward'], nargs='+')
parser.add_argument('-b', '--batch-size', type=int, default=3)
parser.add_argument('-f', '--features', type=int, default=17)
parser.add_argument('-s', '--state-size', type=int, default=5)
parser.add_argument('-c', '--cuda', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
options = parser.parse_args()
if options.cuda:
import cuda.lltm
device = torch.device("cuda")
else:
device = torch.device("cpu")
kwargs = {'dtype': torch.float64,
'device': device,
'requires_grad': True}
X = torch.randn(options.batch_size,
options.features,
**kwargs)
h = torch.randn(options.batch_size, options.state_size, **kwargs)
C = torch.randn(options.batch_size, options.state_size, **kwargs)
W = torch.randn(3 * options.state_size, options.features + options.state_size, **kwargs)
b = torch.randn(1, 3 * options.state_size, **kwargs)
variables = [X, W, b, h, C]
if 'forward' in options.direction:
check_forward(variables, options.cuda, options.verbose)
if 'backward' in options.direction:
check_backward(variables, options.cuda, options.verbose)
|
from __future__ import division
from __future__ import print_function
import argparse
import torch
from torch.autograd import gradcheck
parser = argparse.ArgumentParser()
parser.add_argument('example', choices=['py', 'cpp', 'cuda'])
parser.add_argument('-b', '--batch-size', type=int, default=3)
parser.add_argument('-f', '--features', type=int, default=17)
parser.add_argument('-s', '--state-size', type=int, default=5)
parser.add_argument('-c', '--cuda', action='store_true')
options = parser.parse_args()
if options.example == 'py':
from python.lltm_baseline import LLTMFunction
elif options.example == 'cpp':
from cpp.lltm import LLTMFunction
else:
from cuda.lltm import LLTMFunction
options.cuda = True
device = torch.device("cuda") if options.cuda else torch.device("cpu")
kwargs = {'dtype': torch.float64,
'device': device,
'requires_grad': True}
X = torch.randn(options.batch_size, options.features, **kwargs)
h = torch.randn(options.batch_size, options.state_size, **kwargs)
C = torch.randn(options.batch_size, options.state_size, **kwargs)
W = torch.randn(3 * options.state_size, options.features + options.state_size, **kwargs)
b = torch.randn(1, 3 * options.state_size, **kwargs)
variables = [X, W, b, h, C]
if gradcheck(LLTMFunction.apply, variables):
print('Ok')
|
import math
import torch
import torch.nn.functional as F
torch.manual_seed(42)
class LLTM(torch.nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
# 3 * state_size for input gate, output gate and candidate cell gate.
# input_features + state_size because we will multiply with [input, h].
self.weights = torch.nn.Parameter(
torch.Tensor(3 * state_size, input_features + state_size))
self.bias = torch.nn.Parameter(torch.Tensor(1, 3 * state_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.state_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, +stdv)
def forward(self, input, state):
old_h, old_cell = state
X = torch.cat([old_h, input], dim=1)
# Compute the input, output and candidate cell gates with one MM.
gate_weights = F.linear(X, self.weights, self.bias)
# Split the combined gate weight matrix into its components.
gates = gate_weights.chunk(3, dim=1)
input_gate = torch.sigmoid(gates[0])
output_gate = torch.sigmoid(gates[1])
# Here we use an ELU instead of the usual tanh.
candidate_cell = F.elu(gates[2])
# Compute the new cell state.
new_cell = old_cell + candidate_cell * input_gate
# Compute the new hidden state and output.
new_h = torch.tanh(new_cell) * output_gate
return new_h, new_cell
|
import math
from torch import nn
from torch.autograd import Function
import torch
import torch.nn.functional as F
torch.manual_seed(42)
def d_sigmoid(z):
s = torch.sigmoid(z)
return (1 - s) * s
def d_tanh(z):
t = torch.tanh(z)
return 1 - (t * t)
def d_elu(z, alpha=1.0):
e = z.exp()
mask = (alpha * (e - 1)) < 0
return (z > 0).type_as(z) + mask.type_as(z) * (alpha * e)
class LLTMFunction(Function):
@staticmethod
def forward(ctx, input, weights, bias, old_h, old_cell):
X = torch.cat([old_h, input], dim=1)
gate_weights = F.linear(X, weights, bias)
gates = gate_weights.chunk(3, dim=1)
input_gate = torch.sigmoid(gates[0])
output_gate = torch.sigmoid(gates[1])
candidate_cell = F.elu(gates[2])
new_cell = old_cell + candidate_cell * input_gate
new_h = torch.tanh(new_cell) * output_gate
ctx.save_for_backward(X, weights, input_gate, output_gate, old_cell,
new_cell, candidate_cell, gate_weights)
return new_h, new_cell
@staticmethod
def backward(ctx, grad_h, grad_cell):
X, weights, input_gate, output_gate, old_cell = ctx.saved_variables[:5]
new_cell, candidate_cell, gate_weights = ctx.saved_variables[5:]
d_input = d_weights = d_bias = d_old_h = d_old_cell = None
d_output_gate = torch.tanh(new_cell) * grad_h
d_tanh_new_cell = output_gate * grad_h
d_new_cell = d_tanh(new_cell) * d_tanh_new_cell + grad_cell
d_old_cell = d_new_cell
d_candidate_cell = input_gate * d_new_cell
d_input_gate = candidate_cell * d_new_cell
gates = gate_weights.chunk(3, dim=1)
d_input_gate *= d_sigmoid(gates[0])
d_output_gate *= d_sigmoid(gates[1])
d_candidate_cell *= d_elu(gates[2])
d_gates = torch.cat(
[d_input_gate, d_output_gate, d_candidate_cell], dim=1)
if ctx.needs_input_grad[1]:
d_weights = d_gates.t().mm(X)
if ctx.needs_input_grad[2]:
d_bias = d_gates.sum(dim=0, keepdim=True)
if ctx.needs_input_grad[3] or ctx.needs_input_grad[4]:
d_X = d_gates.mm(weights)
state_size = grad_h.shape[1]
d_old_h, d_input = d_X[:, :state_size], d_X[:, state_size:]
return d_input, d_weights, d_bias, d_old_h, d_old_cell
class LLTM(nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
self.weights = nn.Parameter(
torch.Tensor(3 * state_size, input_features + state_size))
self.bias = nn.Parameter(torch.Tensor(1, 3 * state_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.state_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, +stdv)
def forward(self, input, state):
return LLTMFunction.apply(input, self.weights, self.bias, *state)
|
from torch.utils.cpp_extension import load
lltm_cuda = load(
'lltm_cuda', ['lltm_cuda.cpp', 'lltm_cuda_kernel.cu'], verbose=True)
help(lltm_cuda)
|
import math
from torch import nn
from torch.autograd import Function
import torch
import lltm_cuda
torch.manual_seed(42)
class LLTMFunction(Function):
@staticmethod
def forward(ctx, input, weights, bias, old_h, old_cell):
outputs = lltm_cuda.forward(input, weights, bias, old_h, old_cell)
new_h, new_cell = outputs[:2]
variables = outputs[1:] + [weights]
ctx.save_for_backward(*variables)
return new_h, new_cell
@staticmethod
def backward(ctx, grad_h, grad_cell):
outputs = lltm_cuda.backward(
grad_h.contiguous(), grad_cell.contiguous(), *ctx.saved_variables)
d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates = outputs
return d_input, d_weights, d_bias, d_old_h, d_old_cell
class LLTM(nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
self.weights = nn.Parameter(
torch.Tensor(3 * state_size, input_features + state_size))
self.bias = nn.Parameter(torch.Tensor(1, 3 * state_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.state_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, +stdv)
def forward(self, input, state):
return LLTMFunction.apply(input, self.weights, self.bias, *state)
|
from torch.utils.cpp_extension import load
lltm_cpp = load(name="lltm_cpp", sources=["lltm.cpp"], verbose=True)
help(lltm_cpp)
|
import math
from torch import nn
from torch.autograd import Function
import torch
import lltm_cpp
torch.manual_seed(42)
class LLTMFunction(Function):
@staticmethod
def forward(ctx, input, weights, bias, old_h, old_cell):
outputs = lltm_cpp.forward(input, weights, bias, old_h, old_cell)
new_h, new_cell = outputs[:2]
variables = outputs[1:] + [weights]
ctx.save_for_backward(*variables)
return new_h, new_cell
@staticmethod
def backward(ctx, grad_h, grad_cell):
d_old_h, d_input, d_weights, d_bias, d_old_cell = lltm_cpp.backward(
grad_h, grad_cell, *ctx.saved_variables)
return d_input, d_weights, d_bias, d_old_h, d_old_cell
class LLTM(nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
self.weights = nn.Parameter(
torch.Tensor(3 * state_size, input_features + state_size))
self.bias = nn.Parameter(torch.Tensor(1, 3 * state_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.state_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, +stdv)
def forward(self, input, state):
return LLTMFunction.apply(input, self.weights, self.bias, *state)
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
torch.manual_seed(args.seed)
if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
|
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model.
#
###############################################################################
import argparse
import torch
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--mps', action='store_true', default=False,
help='enables macOS GPU training')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda.")
if torch.backends.mps.is_available():
if not args.mps:
print("WARNING: You have mps device, to enable macOS GPU run with --mps.")
use_mps = args.mps and torch.backends.mps.is_available()
if args.cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3.")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f, map_location=device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, 'w') as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError as e:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""") from e
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens in the sequence.
The positional encodings have the same dimension as the embeddings, so that the two can be summed.
Here, we use sine and cosine functions of different frequencies.
.. math:
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Transformer):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__(d_model=ninp, nhead=nhead, dim_feedforward=nhid, num_encoder_layers=nlayers)
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
self.input_emb = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.input_emb.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.input_emb(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.encoder(src, mask=self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
|
# coding: utf-8
import argparse
import time
import math
import os
import torch
import torch.nn as nn
import torch.onnx
import data
import model
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM/GRU/Transformer Language Model')
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of network (RNN_TANH, RNN_RELU, LSTM, GRU, Transformer)')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--mps', action='store_true', default=False,
help='enables macOS GPU training')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--onnx-export', type=str, default='',
help='path to export the final model in onnx format')
parser.add_argument('--nhead', type=int, default=2,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--dry-run', action='store_true',
help='verify the code and the model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda.")
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
if not args.mps:
print("WARNING: You have mps device, to enable macOS GPU run with --mps.")
use_mps = args.mps and torch.backends.mps.is_available()
if args.cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.model == 'Transformer':
model = model.TransformerModel(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(device)
else:
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
criterion = nn.NLLLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
# get_batch subdivides the source data into chunks of length args.bptt.
# If source is equal to the example output of the batchify function, with
# a bptt-limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the batchify function. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM.
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
if args.model != 'Transformer':
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
output, hidden = model(data, hidden)
hidden = repackage_hidden(hidden)
total_loss += len(data) * criterion(output, targets).item()
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.
start_time = time.time()
ntokens = len(corpus.dictionary)
if args.model != 'Transformer':
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
model.zero_grad()
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
hidden = repackage_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output, targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(p.grad, alpha=-lr)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
if args.dry_run:
break
def export_onnx(path, batch_size, seq_len):
print('The model is also exported in ONNX format at {}.'.format(os.path.realpath(args.onnx_export)))
model.eval()
dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
hidden = model.init_hidden(batch_size)
torch.onnx.export(model, (dummy_input, hidden), path)
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
# Currently, only rnn model supports flatten_parameters function.
if args.model in ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU']:
model.rnn.flatten_parameters()
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
if len(args.onnx_export) > 0:
# Export the model in ONNX format.
export_onnx(args.onnx_export, batch_size=1, seq_len=args.bptt)
|
import os
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
idss = []
for line in f:
words = line.split() + ['<eos>']
ids = []
for word in words:
ids.append(self.dictionary.word2idx[word])
idss.append(torch.tensor(ids).type(torch.int64))
ids = torch.cat(idss)
return ids
|
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision
from torchvision.transforms import Compose, ToTensor, Resize
from torch import optim
import numpy as np
from torch.hub import tqdm
class PatchExtractor(nn.Module):
def __init__(self, patch_size=16):
super().__init__()
self.patch_size = patch_size
def forward(self, input_data):
batch_size, channels, height, width = input_data.size()
assert height % self.patch_size == 0 and width % self.patch_size == 0, \
f"Input height ({height}) and width ({width}) must be divisible by patch size ({self.patch_size})"
num_patches_h = height // self.patch_size
num_patches_w = width // self.patch_size
num_patches = num_patches_h * num_patches_w
patches = input_data.unfold(2, self.patch_size, self.patch_size). \
unfold(3, self.patch_size, self.patch_size). \
permute(0, 2, 3, 1, 4, 5). \
contiguous(). \
view(batch_size, num_patches, -1)
# Expected shape of a patch on default settings is (4, 196, 768)
return patches
class InputEmbedding(nn.Module):
def __init__(self, args):
super(InputEmbedding, self).__init__()
self.patch_size = args.patch_size
self.n_channels = args.n_channels
self.latent_size = args.latent_size
use_cuda = not args.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.batch_size = args.batch_size
self.input_size = self.patch_size * self.patch_size * self.n_channels
# Linear projection
self.LinearProjection = nn.Linear(self.input_size, self.latent_size)
# Class token
self.class_token = nn.Parameter(torch.randn(self.batch_size, 1, self.latent_size)).to(self.device)
# Positional embedding
self.pos_embedding = nn.Parameter(torch.randn(self.batch_size, 1, self.latent_size)).to(self.device)
def forward(self, input_data):
input_data = input_data.to(self.device)
# Patchifying the Image
patchify = PatchExtractor(patch_size=self.patch_size)
patches = patchify(input_data)
linear_projection = self.LinearProjection(patches).to(self.device)
b, n, _ = linear_projection.shape
linear_projection = torch.cat((self.class_token, linear_projection), dim=1)
pos_embed = self.pos_embedding[:, :n + 1, :]
linear_projection += pos_embed
return linear_projection
class EncoderBlock(nn.Module):
def __init__(self, args):
super(EncoderBlock, self).__init__()
self.latent_size = args.latent_size
self.num_heads = args.num_heads
self.dropout = args.dropout
self.norm = nn.LayerNorm(self.latent_size)
self.attention = nn.MultiheadAttention(self.latent_size, self.num_heads, dropout=self.dropout)
self.enc_MLP = nn.Sequential(
nn.Linear(self.latent_size, self.latent_size * 4),
nn.GELU(),
nn.Dropout(self.dropout),
nn.Linear(self.latent_size * 4, self.latent_size),
nn.Dropout(self.dropout)
)
def forward(self, emb_patches):
first_norm = self.norm(emb_patches)
attention_out = self.attention(first_norm, first_norm, first_norm)[0]
first_added = attention_out + emb_patches
second_norm = self.norm(first_added)
mlp_out = self.enc_MLP(second_norm)
output = mlp_out + first_added
return output
class ViT(nn.Module):
def __init__(self, args):
super(ViT, self).__init__()
self.num_encoders = args.num_encoders
self.latent_size = args.latent_size
self.num_classes = args.num_classes
self.dropout = args.dropout
self.embedding = InputEmbedding(args)
# Encoder Stack
self.encoders = nn.ModuleList([EncoderBlock(args) for _ in range(self.num_encoders)])
self.MLPHead = nn.Sequential(
nn.LayerNorm(self.latent_size),
nn.Linear(self.latent_size, self.latent_size),
nn.Linear(self.latent_size, self.num_classes),
)
def forward(self, test_input):
enc_output = self.embedding(test_input)
for enc_layer in self.encoders:
enc_output = enc_layer(enc_output)
class_token_embed = enc_output[:, 0]
return self.MLPHead(class_token_embed)
class TrainEval:
def __init__(self, args, model, train_dataloader, val_dataloader, optimizer, criterion, device):
self.model = model
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.optimizer = optimizer
self.criterion = criterion
self.epoch = args.epochs
self.device = device
self.args = args
def train_fn(self, current_epoch):
self.model.train()
total_loss = 0.0
tk = tqdm(self.train_dataloader, desc="EPOCH" + "[TRAIN]" + str(current_epoch + 1) + "/" + str(self.epoch))
for t, data in enumerate(tk):
images, labels = data
images, labels = images.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
logits = self.model(images)
loss = self.criterion(logits, labels)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
tk.set_postfix({"Loss": "%6f" % float(total_loss / (t + 1))})
if self.args.dry_run:
break
return total_loss / len(self.train_dataloader)
def eval_fn(self, current_epoch):
self.model.eval()
total_loss = 0.0
tk = tqdm(self.val_dataloader, desc="EPOCH" + "[VALID]" + str(current_epoch + 1) + "/" + str(self.epoch))
for t, data in enumerate(tk):
images, labels = data
images, labels = images.to(self.device), labels.to(self.device)
logits = self.model(images)
loss = self.criterion(logits, labels)
total_loss += loss.item()
tk.set_postfix({"Loss": "%6f" % float(total_loss / (t + 1))})
if self.args.dry_run:
break
return total_loss / len(self.val_dataloader)
def train(self):
best_valid_loss = np.inf
best_train_loss = np.inf
for i in range(self.epoch):
train_loss = self.train_fn(i)
val_loss = self.eval_fn(i)
if val_loss < best_valid_loss:
torch.save(self.model.state_dict(), "best-weights.pt")
print("Saved Best Weights")
best_valid_loss = val_loss
best_train_loss = train_loss
print(f"Training Loss : {best_train_loss}")
print(f"Valid Loss : {best_valid_loss}")
'''
On default settings:
Training Loss : 2.3081023390197752
Valid Loss : 2.302861615943909
However, this score is not competitive compared to the
high results in the original paper, which were achieved
through pre-training on JFT-300M dataset, then fine-tuning
it on the target dataset. To improve the model quality
without pre-training, we could try training for more epochs,
using more Transformer layers, resizing images or changing
patch size,
'''
def main():
parser = argparse.ArgumentParser(description='Vision Transformer in PyTorch')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--patch-size', type=int, default=16,
help='patch size for images (default : 16)')
parser.add_argument('--latent-size', type=int, default=768,
help='latent size (default : 768)')
parser.add_argument('--n-channels', type=int, default=3,
help='number of channels in images (default : 3 for RGB)')
parser.add_argument('--num-heads', type=int, default=12,
help='(default : 16)')
parser.add_argument('--num-encoders', type=int, default=12,
help='number of encoders (default : 12)')
parser.add_argument('--dropout', type=int, default=0.1,
help='dropout value (default : 0.1)')
parser.add_argument('--img-size', type=int, default=224,
help='image size to be reshaped to (default : 224')
parser.add_argument('--num-classes', type=int, default=10,
help='number of classes in dataset (default : 10 for CIFAR10)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs (default : 10)')
parser.add_argument('--lr', type=float, default=1e-2,
help='base learning rate (default : 0.01)')
parser.add_argument('--weight-decay', type=int, default=3e-2,
help='weight decay value (default : 0.03)')
parser.add_argument('--batch-size', type=int, default=4,
help='batch size (default : 4)')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
transforms = Compose([
Resize((args.img_size, args.img_size)),
ToTensor()
])
train_data = torchvision.datasets.CIFAR10(root='./dataset', train=True, download=True, transform=transforms)
valid_data = torchvision.datasets.CIFAR10(root='./dataset', train=False, download=True, transform=transforms)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=True)
model = ViT(args).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
TrainEval(args, model, train_loader, valid_loader, optimizer, criterion, device).train()
if __name__ == "__main__":
main()
|
import os
import time
import requests
import tarfile
import numpy as np
import argparse
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
################################
### GAT LAYER DEFINITION ###
################################
class GraphAttentionLayer(nn.Module):
"""
Graph Attention Layer (GAT) as described in the paper `"Graph Attention Networks" <https://arxiv.org/pdf/1710.10903.pdf>`.
This operation can be mathematically described as:
e_ij = a(W h_i, W h_j)
α_ij = softmax_j(e_ij) = exp(e_ij) / Σ_k(exp(e_ik))
h_i' = σ(Σ_j(α_ij W h_j))
where h_i and h_j are the feature vectors of nodes i and j respectively, W is a learnable weight matrix,
a is an attention mechanism that computes the attention coefficients e_ij, and σ is an activation function.
"""
def __init__(self, in_features: int, out_features: int, n_heads: int, concat: bool = False, dropout: float = 0.4, leaky_relu_slope: float = 0.2):
super(GraphAttentionLayer, self).__init__()
self.n_heads = n_heads # Number of attention heads
self.concat = concat # wether to concatenate the final attention heads
self.dropout = dropout # Dropout rate
if concat: # concatenating the attention heads
self.out_features = out_features # Number of output features per node
assert out_features % n_heads == 0 # Ensure that out_features is a multiple of n_heads
self.n_hidden = out_features // n_heads
else: # averaging output over the attention heads (Used in the main paper)
self.n_hidden = out_features
# A shared linear transformation, parametrized by a weight matrix W is applied to every node
# Initialize the weight matrix W
self.W = nn.Parameter(torch.empty(size=(in_features, self.n_hidden * n_heads)))
# Initialize the attention weights a
self.a = nn.Parameter(torch.empty(size=(n_heads, 2 * self.n_hidden, 1)))
self.leakyrelu = nn.LeakyReLU(leaky_relu_slope) # LeakyReLU activation function
self.softmax = nn.Softmax(dim=1) # softmax activation function to the attention coefficients
self.reset_parameters() # Reset the parameters
def reset_parameters(self):
"""
Reinitialize learnable parameters.
"""
nn.init.xavier_normal_(self.W)
nn.init.xavier_normal_(self.a)
def _get_attention_scores(self, h_transformed: torch.Tensor):
"""calculates the attention scores e_ij for all pairs of nodes (i, j) in the graph
in vectorized parallel form. for each pair of source and target nodes (i, j),
the attention score e_ij is computed as follows:
e_ij = LeakyReLU(a^T [Wh_i || Wh_j])
where || denotes the concatenation operation, and a and W are the learnable parameters.
Args:
h_transformed (torch.Tensor): Transformed feature matrix with shape (n_nodes, n_heads, n_hidden),
where n_nodes is the number of nodes and out_features is the number of output features per node.
Returns:
torch.Tensor: Attention score matrix with shape (n_heads, n_nodes, n_nodes), where n_nodes is the number of nodes.
"""
source_scores = torch.matmul(h_transformed, self.a[:, :self.n_hidden, :])
target_scores = torch.matmul(h_transformed, self.a[:, self.n_hidden:, :])
# broadcast add
# (n_heads, n_nodes, 1) + (n_heads, 1, n_nodes) = (n_heads, n_nodes, n_nodes)
e = source_scores + target_scores.mT
return self.leakyrelu(e)
def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
"""
Performs a graph attention layer operation.
Args:
h (torch.Tensor): Input tensor representing node features.
adj_mat (torch.Tensor): Adjacency matrix representing graph structure.
Returns:
torch.Tensor: Output tensor after the graph convolution operation.
"""
n_nodes = h.shape[0]
# Apply linear transformation to node feature -> W h
# output shape (n_nodes, n_hidden * n_heads)
h_transformed = torch.mm(h, self.W)
h_transformed = F.dropout(h_transformed, self.dropout, training=self.training)
# splitting the heads by reshaping the tensor and putting heads dim first
# output shape (n_heads, n_nodes, n_hidden)
h_transformed = h_transformed.view(n_nodes, self.n_heads, self.n_hidden).permute(1, 0, 2)
# getting the attention scores
# output shape (n_heads, n_nodes, n_nodes)
e = self._get_attention_scores(h_transformed)
# Set the attention score for non-existent edges to -9e15 (MASKING NON-EXISTENT EDGES)
connectivity_mask = -9e16 * torch.ones_like(e)
e = torch.where(adj_mat > 0, e, connectivity_mask) # masked attention scores
# attention coefficients are computed as a softmax over the rows
# for each column j in the attention score matrix e
attention = F.softmax(e, dim=-1)
attention = F.dropout(attention, self.dropout, training=self.training)
# final node embeddings are computed as a weighted average of the features of its neighbors
h_prime = torch.matmul(attention, h_transformed)
# concatenating/averaging the attention heads
# output shape (n_nodes, out_features)
if self.concat:
h_prime = h_prime.permute(1, 0, 2).contiguous().view(n_nodes, self.out_features)
else:
h_prime = h_prime.mean(dim=0)
return h_prime
################################
### MAIN GAT NETWORK MODULE ###
################################
class GAT(nn.Module):
"""
Graph Attention Network (GAT) as described in the paper `"Graph Attention Networks" <https://arxiv.org/pdf/1710.10903.pdf>`.
Consists of a 2-layer stack of Graph Attention Layers (GATs). The fist GAT Layer is followed by an ELU activation.
And the second (final) layer is a GAT layer with a single attention head and softmax activation function.
"""
def __init__(self,
in_features,
n_hidden,
n_heads,
num_classes,
concat=False,
dropout=0.4,
leaky_relu_slope=0.2):
""" Initializes the GAT model.
Args:
in_features (int): number of input features per node.
n_hidden (int): output size of the first Graph Attention Layer.
n_heads (int): number of attention heads in the first Graph Attention Layer.
num_classes (int): number of classes to predict for each node.
concat (bool, optional): Wether to concatinate attention heads or take an average over them for the
output of the first Graph Attention Layer. Defaults to False.
dropout (float, optional): dropout rate. Defaults to 0.4.
leaky_relu_slope (float, optional): alpha (slope) of the leaky relu activation. Defaults to 0.2.
"""
super(GAT, self).__init__()
# Define the Graph Attention layers
self.gat1 = GraphAttentionLayer(
in_features=in_features, out_features=n_hidden, n_heads=n_heads,
concat=concat, dropout=dropout, leaky_relu_slope=leaky_relu_slope
)
self.gat2 = GraphAttentionLayer(
in_features=n_hidden, out_features=num_classes, n_heads=1,
concat=False, dropout=dropout, leaky_relu_slope=leaky_relu_slope
)
def forward(self, input_tensor: torch.Tensor , adj_mat: torch.Tensor):
"""
Performs a forward pass through the network.
Args:
input_tensor (torch.Tensor): Input tensor representing node features.
adj_mat (torch.Tensor): Adjacency matrix representing graph structure.
Returns:
torch.Tensor: Output tensor after the forward pass.
"""
# Apply the first Graph Attention layer
x = self.gat1(input_tensor, adj_mat)
x = F.elu(x) # Apply ELU activation function to the output of the first layer
# Apply the second Graph Attention layer
x = self.gat2(x, adj_mat)
return F.log_softmax(x, dim=1) # Apply log softmax activation function
################################
### LOADING THE CORA DATASET ###
################################
def load_cora(path='./cora', device='cpu'):
"""
Loads the Cora dataset. The dataset is downloaded from https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz.
"""
# Set the paths to the data files
content_path = os.path.join(path, 'cora.content')
cites_path = os.path.join(path, 'cora.cites')
# Load data from files
content_tensor = np.genfromtxt(content_path, dtype=np.dtype(str))
cites_tensor = np.genfromtxt(cites_path, dtype=np.int32)
# Process features
features = torch.FloatTensor(content_tensor[:, 1:-1].astype(np.int32)) # Extract feature values
scale_vector = torch.sum(features, dim=1) # Compute sum of features for each node
scale_vector = 1 / scale_vector # Compute reciprocal of the sums
scale_vector[scale_vector == float('inf')] = 0 # Handle division by zero cases
scale_vector = torch.diag(scale_vector).to_sparse() # Convert the scale vector to a sparse diagonal matrix
features = scale_vector @ features # Scale the features using the scale vector
# Process labels
classes, labels = np.unique(content_tensor[:, -1], return_inverse=True) # Extract unique classes and map labels to indices
labels = torch.LongTensor(labels) # Convert labels to a tensor
# Process adjacency matrix
idx = content_tensor[:, 0].astype(np.int32) # Extract node indices
idx_map = {id: pos for pos, id in enumerate(idx)} # Create a dictionary to map indices to positions
# Map node indices to positions in the adjacency matrix
edges = np.array(
list(map(lambda edge: [idx_map[edge[0]], idx_map[edge[1]]],
cites_tensor)), dtype=np.int32)
V = len(idx) # Number of nodes
E = edges.shape[0] # Number of edges
adj_mat = torch.sparse_coo_tensor(edges.T, torch.ones(E), (V, V), dtype=torch.int64) # Create the initial adjacency matrix as a sparse tensor
adj_mat = torch.eye(V) + adj_mat # Add self-loops to the adjacency matrix
# return features.to_sparse().to(device), labels.to(device), adj_mat.to_sparse().to(device)
return features.to(device), labels.to(device), adj_mat.to(device)
#################################
### TRAIN AND TEST FUNCTIONS ###
#################################
def train_iter(epoch, model, optimizer, criterion, input, target, mask_train, mask_val, print_every=10):
start_t = time.time()
model.train()
optimizer.zero_grad()
# Forward pass
output = model(*input)
loss = criterion(output[mask_train], target[mask_train]) # Compute the loss using the training mask
loss.backward()
optimizer.step()
# Evaluate the model performance on training and validation sets
loss_train, acc_train = test(model, criterion, input, target, mask_train)
loss_val, acc_val = test(model, criterion, input, target, mask_val)
if epoch % print_every == 0:
# Print the training progress at specified intervals
print(f'Epoch: {epoch:04d} ({(time.time() - start_t):.4f}s) loss_train: {loss_train:.4f} acc_train: {acc_train:.4f} loss_val: {loss_val:.4f} acc_val: {acc_val:.4f}')
def test(model, criterion, input, target, mask):
model.eval()
with torch.no_grad():
output = model(*input)
output, target = output[mask], target[mask]
loss = criterion(output, target)
acc = (output.argmax(dim=1) == target).float().sum() / len(target)
return loss.item(), acc.item()
if __name__ == '__main__':
# Training settings
# All defalut values are the same as in the config used in the main paper
parser = argparse.ArgumentParser(description='PyTorch Graph Attention Network')
parser.add_argument('--epochs', type=int, default=300,
help='number of epochs to train (default: 300)')
parser.add_argument('--lr', type=float, default=0.005,
help='learning rate (default: 0.005)')
parser.add_argument('--l2', type=float, default=5e-4,
help='weight decay (default: 6e-4)')
parser.add_argument('--dropout-p', type=float, default=0.6,
help='dropout probability (default: 0.6)')
parser.add_argument('--hidden-dim', type=int, default=64,
help='dimension of the hidden representation (default: 64)')
parser.add_argument('--num-heads', type=int, default=8,
help='number of the attention heads (default: 4)')
parser.add_argument('--concat-heads', action='store_true', default=False,
help='wether to concatinate attention heads, or average over them (default: False)')
parser.add_argument('--val-every', type=int, default=20,
help='epochs to wait for print training and validation evaluation (default: 20)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=13, metavar='S',
help='random seed (default: 13)')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
# Set the device to run on
if use_cuda:
device = torch.device('cuda')
elif use_mps:
device = torch.device('mps')
else:
device = torch.device('cpu')
print(f'Using {device} device')
# Load the dataset
cora_url = 'https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz'
path = './cora'
if os.path.isfile(os.path.join(path, 'cora.content')) and os.path.isfile(os.path.join(path, 'cora.cites')):
print('Dataset already downloaded...')
else:
print('Downloading dataset...')
with requests.get(cora_url, stream=True) as tgz_file:
with tarfile.open(fileobj=tgz_file.raw, mode='r:gz') as tgz_object:
tgz_object.extractall()
print('Loading dataset...')
# Load the dataset
features, labels, adj_mat = load_cora(device=device)
# Split the dataset into training, validation, and test sets
idx = torch.randperm(len(labels)).to(device)
idx_test, idx_val, idx_train = idx[:1200], idx[1200:1600], idx[1600:]
# Create the model
# The model consists of a 2-layer stack of Graph Attention Layers (GATs).
gat_net = GAT(
in_features=features.shape[1], # Number of input features per node
n_hidden=args.hidden_dim, # Output size of the first Graph Attention Layer
n_heads=args.num_heads, # Number of attention heads in the first Graph Attention Layer
num_classes=labels.max().item() + 1, # Number of classes to predict for each node
concat=args.concat_heads, # Wether to concatinate attention heads
dropout=args.dropout_p, # Dropout rate
leaky_relu_slope=0.2 # Alpha (slope) of the leaky relu activation
).to(device)
# configure the optimizer and loss function
optimizer = Adam(gat_net.parameters(), lr=args.lr, weight_decay=args.l2)
criterion = nn.NLLLoss()
# Train and evaluate the model
for epoch in range(args.epochs):
train_iter(epoch + 1, gat_net, optimizer, criterion, (features, adj_mat), labels, idx_train, idx_val, args.val_every)
if args.dry_run:
break
loss_test, acc_test = test(gat_net, criterion, (features, adj_mat), labels, idx_test)
print(f'Test set results: loss {loss_test:.4f} accuracy {acc_test:.4f}') |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.rnn = nn.LSTM(input_size=28, hidden_size=64, batch_first=True)
self.batchnorm = nn.BatchNorm1d(64)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(64, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, input):
# Shape of input is (batch_size,1, 28, 28)
# converting shape of input to (batch_size, 28, 28)
# as required by RNN when batch_first is set True
input = input.reshape(-1, 28, 28)
output, hidden = self.rnn(input)
# RNN output shape is (seq_len, batch, input_size)
# Get last output of RNN
output = output[:, -1, :]
output = self.batchnorm(output)
output = self.dropout1(output)
output = self.fc1(output)
output = F.relu(output)
output = self.dropout2(output)
output = self.fc2(output)
output = F.log_softmax(output, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
if args.dry_run:
break
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example using RNN')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='for Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_rnn.pt")
if __name__ == '__main__':
main()
|
import argparse
import torch
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import parallelize_module
from utils import cleanup, setup, ToyModel
try:
from torch.distributed.tensor.parallel import (
SequenceParallel
)
SP_AVAILABLE = True
except BaseException as e:
pass
"""
This is the script to test Sequence Parallel(SP) on a toy model in a
Megetron-LM SPMD style. We show an E2E working flow from forward,
backward and optimization.
We use the example of two `nn.Linear` layers with an element-wise `nn.RELU`
in between to show an example of sequence parallel, which was proposed in paper:
https://arxiv.org/pdf/2205.05198.pdf.
Like tensor parallel, we parallelize the first linear layer by column
and also parallelize the second linear layer by row. But the input in each rank
now is different so that we need one all-gather for input and one reduce-scatter
in the end of the second linear layer.
"""
def demo_sp(rank, args):
"""
Main body of the demo of a basic version of sequence parallel by using
PyTorch native APIs.
"""
print(f"Running SP example on rank {rank}.")
setup(rank, args.world_size)
# create a sharding plan based on the given world_size.
device_mesh = DeviceMesh("cuda", torch.arange(0, args.world_size))
# create model and move it to GPU with id rank
model = ToyModel().cuda(rank)
# Create a optimizer for the parallelized module.
LR = 0.25
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
# Parallelize the module based on the given Parallel Style.
model = parallelize_module(model, device_mesh, SequenceParallel())
# Perform a num of iterations of forward/backward
# and optimizations for the sharded module.
for _ in range(args.iter_nums):
# For SP, input can be different across all ranks.
inp = torch.rand(20, 10).cuda(rank)
output = model(inp)
output.sum().backward()
optimizer.step()
cleanup()
if __name__ == "__main__":
n_gpus = torch.cuda.device_count()
parser = argparse.ArgumentParser()
# This is passed in via cmd
parser.add_argument("--world_size", type=int, default=n_gpus)
parser.add_argument("--iter_nums", type=int, default=10)
args = parser.parse_args()
# The main entry point is called directly without using subprocess
if n_gpus < 2:
print("Requires at least 2 GPUs to run.")
elif not SP_AVAILABLE:
print(
"PyTorch doesn't have Sequence Parallelism available,"
" need nightly build."
)
else:
mp.spawn(demo_sp, args=(args,), nprocs=args.world_size, join=True)
|
import argparse
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
def cleanup():
dist.destroy_process_group()
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 32)
self.relu = nn.ReLU()
self.net2 = nn.Linear(32, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
|
import argparse
import torch
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from utils import cleanup, setup, ToyModel
"""
This is the script to test Tensor Parallel(TP) on a toy model in a
Megetron-LM SPMD style. We show an E2E working flow from forward,
backward and optimization.
More context about API designs can be found in the design:
https://github.com/pytorch/pytorch/issues/89884.
And it is built on top of Distributed Tensor which is proposed in:
https://github.com/pytorch/pytorch/issues/88838.
We use the example of two `nn.Linear` layers with an element-wise `nn.RELU`
in between to show an example of Megatron-LM, which was proposed in paper:
https://arxiv.org/abs/1909.08053.
The basic idea is that we parallelize the first linear layer by column
and also parallelize the second linear layer by row so that we only need
one all reduce in the end of the second linear layer.
We can speed up the model training by avoiding communications between
two layers.
To parallelize a nn module, we need to specify what parallel style we want
to use and our `parallelize_module` API will parse and parallelize the modules
based on the given `ParallelStyle`. We are using this PyTorch native Tensor
Parallelism APIs in this example to show users how to use them.
"""
def demo_tp(rank, args):
"""
Main body of the demo of a basic version of tensor parallel by using
PyTorch native APIs.
"""
print(f"Running basic Megatron style TP example on rank {rank}.")
setup(rank, args.world_size)
# create a sharding plan based on the given world_size.
device_mesh = DeviceMesh("cuda", torch.arange(0, args.world_size))
# create model and move it to GPU with id rank
model = ToyModel().cuda(rank)
# Create a optimizer for the parallelized module.
LR = 0.25
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
# Parallelize the module based on the given Parallel Style.
model = parallelize_module(model, device_mesh, PairwiseParallel())
# Perform a num of iterations of forward/backward
# and optimizations for the sharded module.
for i in range(args.iter_nums):
# For TP, input needs to be same across all TP ranks.
# Setting the random seed is to mimic the behavior of dataloader.
torch.manual_seed(i)
inp = torch.rand(20, 10).cuda(rank)
output = model(inp)
output.sum().backward()
optimizer.step()
cleanup()
if __name__ == "__main__":
n_gpus = torch.cuda.device_count()
parser = argparse.ArgumentParser()
# This is passed in via cmd
parser.add_argument("--world_size", type=int, default=n_gpus)
parser.add_argument("--iter_nums", type=int, default=10)
args = parser.parse_args()
# The main entry point is called directly without using subprocess
if n_gpus < 2:
print("Requires at least 2 GPUs to run.")
else:
mp.spawn(demo_tp, args=(args,), nprocs=args.world_size, join=True)
|
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.distributed._tensor import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from utils import cleanup, setup, ToyModel
try:
from torch.distributed.tensor.parallel import (
SequenceParallel
)
SP_AVAILABLE = True
except BaseException as e:
pass
"""
This is the script to test 2D Parallel which combines Tensor/Sequence
parallel with Fully Sharded Data Parallel (TP/SP + FSDP) on a toy model
in the SPMD style. We show an E2E working flow from forward, backward
and optimization.
We enabled Fully Sharded Data Parallel + Tensor Parallel in
separate parallel dimensions:
Data Parallel across hosts
Tensor Parallel within each host
We use a simple diagram to illustrate below:
======================================================================
------------ ------------ ------------ ------------
| Host 1 | | Host 2 | | | | Host N |
| 8 GPUs | | 8 GPUs | | | | 8 GPUs |
| | | | | ... | | |
| (TP) | | (TP) | | | | (TP) |
|[0,1,..,7]| |[8,9..,15]| | | |[8N-8,8N-7|
| | | | | | | .., 8N-1]|
| | | | | | | |
------------ ------------ ------------ ------------
FSDP:
[0, 8, ..., 8N-8], [1, 9, ..., 8N-7], ..., [7, 15, ..., 8N-1]
======================================================================
More details can be seen in the slide:
https://docs.google.com/presentation/d/17g6WqrO00rP3MsxbRENsPpjrlSkwiA_QB4r93_eB5is/
"""
def demo_2d(rank, args):
"""
Main body of the demo of a basic version of tensor parallel by using
PyTorch native APIs.
"""
print(f"Running basic Megatron style TP example on rank {rank}.")
setup(rank, args.world_size)
assert (
args.world_size % args.tp_size == 0
), "World size needs to be divisible by TP size"
# create a sharding plan based on the given world_size.
device_mesh = DeviceMesh(
"cuda", torch.arange(0, args.world_size).view(-1, args.tp_size)
)
# create model and move it to GPU with id rank
model = ToyModel().cuda(rank)
# Create a optimizer for the parallelized module.
LR = 0.25
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
# Parallelize the module based on the given Parallel Style.
parallel_style = SequenceParallel() if args.run_seq_parallel else PairwiseParallel()
model = parallelize_module(model, device_mesh, parallel_style, tp_mesh_dim=1)
# We need to register hooks for TP + FSDP integration.
assert (
enable_2d_with_fsdp()
), "FSDP 2D hook is not registered. Please use PyTorch with version >= 2.0"
dp_pg = device_mesh.get_dim_groups()[0]
model = FSDP(model, process_group=dp_pg)
# Perform a num of iterations of forward/backward
# and optimizations for the sharded module.
for i in range(args.iter_nums):
# For TP, input needs to be same across all TP ranks.
# while for SP, input can be different across all ranks.
# Setting the random seed is to mimic the behavior of dataloader.
dp_rank = (
rank
if args.run_seq_parallel
else dist.get_rank(dp_pg)
)
torch.manual_seed(i + dp_rank)
inp = torch.rand(20, 10).cuda(rank)
output = model(inp)
output.sum().backward()
optimizer.step()
cleanup()
if __name__ == "__main__":
n_gpus = torch.cuda.device_count()
parser = argparse.ArgumentParser()
# This is passed in via cmd
parser.add_argument("--world_size", type=int, default=n_gpus)
parser.add_argument("--iter_nums", type=int, default=10)
parser.add_argument("--run_seq_parallel", type=bool, default=False)
parser.add_argument("--tp_size", type=int, default=2)
args = parser.parse_args()
# The main entry point is called directly without using subprocess
if n_gpus < 4:
print("Requires at least 4 GPUs to run.")
elif not SP_AVAILABLE:
print(
"PyTorch doesn't have Sequence Parallelism available,"
" need nightly build."
)
else:
mp.spawn(demo_2d, args=(args,), nprocs=args.world_size, join=True)
|
import argparse
import os
import sys
import tempfile
from urllib.parse import urlparse
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def demo_basic(local_world_size, local_rank):
# setup devices for this process. For local_world_size = 2, num_gpus = 8,
# rank 0 uses GPUs [0, 1, 2, 3] and
# rank 1 uses GPUs [4, 5, 6, 7].
n = torch.cuda.device_count() // local_world_size
device_ids = list(range(local_rank * n, (local_rank + 1) * n))
print(
f"[{os.getpid()}] rank = {dist.get_rank()}, "
+ f"world_size = {dist.get_world_size()}, n = {n}, device_ids = {device_ids} \n", end=''
)
model = ToyModel().cuda(device_ids[0])
ddp_model = DDP(model, device_ids)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
optimizer.zero_grad()
outputs = ddp_model(torch.randn(20, 10))
labels = torch.randn(20, 5).to(device_ids[0])
loss_fn(outputs, labels).backward()
optimizer.step()
def spmd_main(local_world_size, local_rank):
# These are the parameters used to initialize the process group
env_dict = {
key: os.environ[key]
for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
}
if sys.platform == "win32":
# Distributed package only covers collective communications with Gloo
# backend and FileStore on Windows platform. Set init_method parameter
# in init_process_group to a local file.
if "INIT_METHOD" in os.environ.keys():
print(f"init_method is {os.environ['INIT_METHOD']}")
url_obj = urlparse(os.environ["INIT_METHOD"])
if url_obj.scheme.lower() != "file":
raise ValueError("Windows only supports FileStore")
else:
init_method = os.environ["INIT_METHOD"]
else:
# It is a example application, For convience, we create a file in temp dir.
temp_dir = tempfile.gettempdir()
init_method = f"file:///{os.path.join(temp_dir, 'ddp_example')}"
dist.init_process_group(backend="gloo", init_method=init_method, rank=int(env_dict["RANK"]), world_size=int(env_dict["WORLD_SIZE"]))
else:
print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
dist.init_process_group(backend="nccl")
print(
f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, backend={dist.get_backend()} \n", end=''
)
demo_basic(local_world_size, local_rank)
# Tear down the process group
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# This is passed in via launch.py
parser.add_argument("--local_rank", type=int, default=0)
# This needs to be explicitly passed in
parser.add_argument("--local_world_size", type=int, default=1)
args = parser.parse_args()
# The main entry point is called directly without using subprocess
spmd_main(args.local_world_size, args.local_rank)
|
import os
import tempfile
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def demo_basic(rank, world_size):
print(f"Running basic DDP example on rank {rank}.")
setup(rank, world_size)
# create model and move it to GPU with id rank
model = ToyModel().to(rank)
ddp_model = DDP(model, device_ids=[rank])
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
optimizer.zero_grad()
outputs = ddp_model(torch.randn(20, 10))
labels = torch.randn(20, 5).to(rank)
loss_fn(outputs, labels).backward()
optimizer.step()
cleanup()
def run_demo(demo_fn, world_size):
mp.spawn(demo_fn,
args=(world_size,),
nprocs=world_size,
join=True)
def demo_checkpoint(rank, world_size):
print(f"Running DDP checkpoint example on rank {rank}.")
setup(rank, world_size)
model = ToyModel().to(rank)
ddp_model = DDP(model, device_ids=[rank])
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
CHECKPOINT_PATH = tempfile.gettempdir() + "/model.checkpoint"
if rank == 0:
# All processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes.
# Therefore, saving it in one process is sufficient.
torch.save(ddp_model.state_dict(), CHECKPOINT_PATH)
# Use a barrier() to make sure that process 1 loads the model after process
# 0 saves it.
dist.barrier()
# configure map_location properly
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
ddp_model.load_state_dict(
torch.load(CHECKPOINT_PATH, map_location=map_location))
optimizer.zero_grad()
outputs = ddp_model(torch.randn(20, 10))
labels = torch.randn(20, 5).to(rank)
loss_fn = nn.MSELoss()
loss_fn(outputs, labels).backward()
optimizer.step()
# Use a barrier() to make sure that all processes have finished reading the
# checkpoint
dist.barrier()
if rank == 0:
os.remove(CHECKPOINT_PATH)
cleanup()
class ToyMpModel(nn.Module):
def __init__(self, dev0, dev1):
super(ToyMpModel, self).__init__()
self.dev0 = dev0
self.dev1 = dev1
self.net1 = torch.nn.Linear(10, 10).to(dev0)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(10, 5).to(dev1)
def forward(self, x):
x = x.to(self.dev0)
x = self.relu(self.net1(x))
x = x.to(self.dev1)
return self.net2(x)
def demo_model_parallel(rank, world_size):
print(f"Running DDP with model parallel example on rank {rank}.")
setup(rank, world_size)
# setup mp_model and devices for this process
dev0 = rank * 2
dev1 = rank * 2 + 1
mp_model = ToyMpModel(dev0, dev1)
ddp_mp_model = DDP(mp_model)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_mp_model.parameters(), lr=0.001)
optimizer.zero_grad()
# outputs will be on dev1
outputs = ddp_mp_model(torch.randn(20, 10))
labels = torch.randn(20, 5).to(dev1)
loss_fn(outputs, labels).backward()
optimizer.step()
cleanup()
if __name__ == "__main__":
n_gpus = torch.cuda.device_count()
if n_gpus < 8:
print(f"Requires at least 8 GPUs to run, but got {n_gpus}.")
else:
run_demo(demo_basic, 8)
run_demo(demo_checkpoint, 8)
run_demo(demo_model_parallel, 4)
|
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import AutoTokenizer, GPT2TokenizerFast
from transformers import T5Tokenizer, T5ForConditionalGeneration
import functools
from torch.optim.lr_scheduler import StepLR
import torch.nn.functional as F
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from transformers.models.t5.modeling_t5 import T5Block
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
CPUOffload,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from functools import partial
from torch.utils.data import DataLoader
from pathlib import Path
from summarization_dataset import *
import policies
import model_checkpointing
from configs import fsdp_config, train_config
from utils import (bfloat_support, setup,
cleanup, get_date_of_run,
format_metrics_to_gb,
train,validation,setup_model)
from transformers.models.t5.modeling_t5 import T5Block
from typing import Type
import time
import tqdm
from datetime import datetime
def get_policies(cfg, rank):
"""establish current policies for mixed precision and fsdp wrapping"""
mixed_precision_policy = None
wrapping_policy = None
# mixed precision -----
if cfg.mixed_precision:
bfloat_available = bfloat_support()
if bfloat_available and not cfg.use_fp16:
mixed_precision_policy = policies.bfSixteen
if rank == 0:
print(f"bFloat16 enabled for mixed precision - using bfSixteen policy")
elif cfg.use_fp16:
mixed_precision_policy = policies.fpSixteen
if rank == 0:
print(f"FP16 enabled. ")
else:
# mixed_precision_policy = policies.fpSixteen
print(
f"bFloat16 support not present. Will use FP32, and not mixed precision"
)
wrapping_policy = policies.get_t5_wrapper()
return mixed_precision_policy, wrapping_policy
def fsdp_main(args):
model, tokenizer = setup_model(train_config.model_name)
local_rank = int(os.environ['LOCAL_RANK'])
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
dataset = load_dataset('wikihow', 'all', data_dir='data/')
print(dataset.keys())
print("Size of train dataset: ", dataset['train'].shape)
print("Size of Validation dataset: ", dataset['validation'].shape)
#wikihow(tokenizer, type_path, num_samples, input_length, output_length, print_text=False)
train_dataset = wikihow(tokenizer, 'train', 1500, 512, 150, False)
val_dataset = wikihow(tokenizer, 'validation', 300, 512, 150, False)
sampler1 = DistributedSampler(train_dataset, rank=rank, num_replicas=world_size, shuffle=True)
sampler2 = DistributedSampler(val_dataset, rank=rank, num_replicas=world_size)
setup()
train_kwargs = {'batch_size': args.batch_size, 'sampler': sampler1}
test_kwargs = {'batch_size': args.test_batch_size, 'sampler': sampler2}
cuda_kwargs = {'num_workers': 2,
'pin_memory': True,
'shuffle': False}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_loader = torch.utils.data.DataLoader(train_dataset,**train_kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, **test_kwargs)
torch.cuda.set_device(local_rank)
# Set up FSDP parameters
mixed_precision_policy, t5_auto_wrap_policy = get_policies(train_config, rank)
# Apply FSDP wrapping to the model
model = FSDP(model,
auto_wrap_policy=t5_auto_wrap_policy,
mixed_precision=mixed_precision_policy,
sharding_strategy=fsdp_config.sharding_strategy,
device_id=torch.cuda.current_device(),
limit_all_gathers=fsdp_config.limit_all_gathers)
if fsdp_config.fsdp_activation_checkpointing:
policies.apply_fsdp_checkpointing(model)
# Set up optimizer and scheduler
optimizer = optim.AdamW(model.parameters(), lr=train_config.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=train_config.gamma)
best_val_loss = float("inf")
curr_val_loss = float("inf")
file_save_name = "T5-model-"
if rank == 0:
time_of_run = get_date_of_run()
dur = []
train_acc_tracking = []
val_acc_tracking = []
training_start_time = time.time()
if rank == 0 and args.track_memory:
mem_alloc_tracker = []
mem_reserved_tracker = []
for epoch in range(1, args.epochs + 1):
t0 = time.time()
train_accuracy = train(args, model, rank, world_size, train_loader, optimizer, epoch, sampler=sampler1)
if args.run_validation:
curr_val_loss = validation(model, rank, world_size, val_loader)
scheduler.step()
if rank == 0:
print(f"--> epoch {epoch} completed...entering save and stats zone")
dur.append(time.time() - t0)
train_acc_tracking.append(train_accuracy.item())
if args.run_validation:
val_acc_tracking.append(curr_val_loss.item())
if args.track_memory:
mem_alloc_tracker.append(
format_metrics_to_gb(torch.cuda.memory_allocated())
)
mem_reserved_tracker.append(
format_metrics_to_gb(torch.cuda.memory_reserved())
)
if train_config.save_model and curr_val_loss < best_val_loss:
if fsdp_config.checkpoint_type == StateDictType.FULL_STATE_DICT:
model_checkpointing.save_model_checkpoint(
model, optimizer, rank, fsdp_config, epoch=1
)
elif fsdp_config.checkpoint_type == StateDictType.SHARDED_STATE_DICT:
model_checkpointing.save_model_and_optimizer_sharded(model, rank, fsdp_config)
if fsdp_config.save_optimizer:
model_checkpointing.save_model_and_optimizer_sharded(model, rank, fsdp_config, optim=optimizer)
if fsdp_config.save_optimizer:
model_checkpointing.save_optimizer_checkpoint(
model, optimizer, rank, fsdp_config, epoch=1
)
if curr_val_loss < best_val_loss:
best_val_loss = curr_val_loss
if rank==0:
print(f"-->>>> New Val Loss Record: {best_val_loss}")
dist.barrier()
cleanup()
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='PyTorch T5 FSDP Example')
parser.add_argument('--batch-size', type=int, default=4, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=4, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=2, metavar='N',
help='number of epochs to train (default: 3)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--track_memory', action='store_false', default=True,
help='track the gpu memory')
parser.add_argument('--run_validation', action='store_false', default=True,
help='running the validation')
args = parser.parse_args()
torch.manual_seed(args.seed)
fsdp_main(args)
|
import argparse
import glob
import os
import json
import time
import logging
import random
import re
from itertools import chain
from string import punctuation
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset, load_metric
from transformers import (
AdamW,
T5ForConditionalGeneration,
T5Tokenizer,
get_linear_schedule_with_warmup
)
class wikihow(Dataset):
def __init__(self, tokenizer, type_path, num_samples, input_length, output_length, print_text=False):
self.dataset = load_dataset('wikihow', 'all', data_dir='data/', split=type_path)
if num_samples:
self.dataset = self.dataset.select(list(range(0, num_samples)))
self.input_length = input_length
self.tokenizer = tokenizer
self.output_length = output_length
self.print_text = print_text
def __len__(self):
return self.dataset.shape[0]
def clean_text(self, text):
text = text.replace('Example of text:', '')
text = text.replace('Example of Summary:', '')
text = text.replace('\n','')
text = text.replace('``', '')
text = text.replace('"', '')
return text
def convert_to_features(self, example_batch):
# Tokenize contexts and questions (as pairs of inputs)
if self.print_text:
print("Input Text: ", self.clean_text(example_batch['text']))
# input_ = self.clean_text(example_batch['text']) + " </s>"
# target_ = self.clean_text(example_batch['headline']) + " </s>"
input_ = self.clean_text(example_batch['text'])
target_ = self.clean_text(example_batch['headline'])
source = self.tokenizer.batch_encode_plus([input_], max_length=self.input_length,
padding='max_length', truncation=True, return_tensors="pt")
targets = self.tokenizer.batch_encode_plus([target_], max_length=self.output_length,
padding='max_length', truncation=True, return_tensors="pt")
return source, targets
def __getitem__(self, index):
source, targets = self.convert_to_features(self.dataset[index])
source_ids = source["input_ids"].squeeze()
target_ids = targets["input_ids"].squeeze()
src_mask = source["attention_mask"].squeeze()
target_mask = targets["attention_mask"].squeeze()
return {"source_ids": source_ids, "source_mask": src_mask, "target_ids": target_ids, "target_mask": target_mask}
def get_dataset(tokenizer, type_path, num_samples, args):
return wikihow(tokenizer=tokenizer, type_path=type_path, num_samples=num_samples, input_length=max_input_length,
output_length=max_output_length)
|
import os
import torch
import torch.distributed as dist
from datetime import datetime
import tqdm
from transformers import AutoTokenizer, GPT2TokenizerFast
from transformers import T5Tokenizer, T5ForConditionalGeneration
g_gigabyte = 1024**3
def setup():
# initialize the process group
dist.init_process_group("nccl")
def cleanup():
dist.destroy_process_group()
def get_date_of_run():
"""create date and time for file save uniqueness
example: 2022-05-07-08:31:12_PM'
"""
date_of_run = datetime.now().strftime("%Y-%m-%d-%I:%M:%S_%p")
print(f"--> current date and time of run = {date_of_run}")
return date_of_run
def format_metrics_to_gb(item):
"""quick function to format numbers to gigabyte and round to 4 digit precision"""
metric_num = item / g_gigabyte
metric_num = round(metric_num, ndigits=4)
return metric_num
def train(args, model, rank, world_size, train_loader, optimizer, epoch, sampler=None):
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
fsdp_loss = torch.zeros(2).to(local_rank)
if sampler:
sampler.set_epoch(epoch)
if rank==0:
inner_pbar = tqdm.tqdm(
range(len(train_loader)), colour="blue", desc="r0 Training Epoch"
)
for batch in train_loader:
for key in batch.keys():
batch[key] = batch[key].to(local_rank)
optimizer.zero_grad()
output = model(input_ids=batch["source_ids"],attention_mask=batch["source_mask"],labels=batch["target_ids"] )
loss = output["loss"]
loss.backward()
optimizer.step()
fsdp_loss[0] += loss.item()
fsdp_loss[1] += len(batch)
if rank==0:
inner_pbar.update(1)
dist.all_reduce(fsdp_loss, op=dist.ReduceOp.SUM)
train_accuracy = fsdp_loss[0] / fsdp_loss[1]
if rank == 0:
inner_pbar.close()
print(
f"Train Epoch: \t{epoch}, Loss: \t{train_accuracy:.4f}"
)
return train_accuracy
def validation(model, rank, world_size, val_loader):
model.eval()
correct = 0
local_rank = int(os.environ['LOCAL_RANK'])
fsdp_loss = torch.zeros(2).to(local_rank)
if rank == 0:
inner_pbar = tqdm.tqdm(
range(len(val_loader)), colour="green", desc="Validation Epoch"
)
with torch.no_grad():
for batch in val_loader:
for key in batch.keys():
batch[key] = batch[key].to(local_rank)
output = model(input_ids=batch["source_ids"],attention_mask=batch["source_mask"],labels=batch["target_ids"])
fsdp_loss[0] += output["loss"].item() # sum up batch loss
fsdp_loss[1] += len(batch)
if rank==0:
inner_pbar.update(1)
dist.all_reduce(fsdp_loss, op=dist.ReduceOp.SUM)
val_loss = fsdp_loss[0] / fsdp_loss[1]
if rank == 0:
inner_pbar.close()
print(f"Validation Loss: {val_loss:.4f}")
return val_loss
def setup_model(model_name):
model = T5ForConditionalGeneration.from_pretrained(model_name)
tokenizer = T5Tokenizer.from_pretrained(model_name)
return model, tokenizer
|
from .environment import bfloat_support
from .train_utils import setup, cleanup, get_date_of_run, format_metrics_to_gb, train, validation,setup_model
|
# Copyright (c) 2022 Meta Platforms, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the Apache-style license found in the
# LICENSE file in the root directory of this source tree.
# This is a simple check to confirm that your current server has full bfloat support -
# both GPU native support, and Network communication support.
# Be warned that if you run on V100 without a check like this, you will be running without native Bfloat16
# support and will find significant performance degradation (but it will not complain via an error).
# Hence the reason for a checker!
from pkg_resources import packaging
import torch
import torch.cuda.nccl as nccl
import torch.distributed as dist
# global flag that confirms ampere architecture, cuda version and
# nccl version to verify bfloat16 native support is ready
def bfloat_support():
return (
torch.version.cuda
and torch.cuda.is_bf16_supported()
and packaging.version.parse(torch.version.cuda).release >= (11, 0)
and dist.is_nccl_available()
and nccl.version() >= (2, 10)
)
|
import torch
import os
import torch.distributed as dist
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
apply_activation_checkpointing,
)
from transformers.models.t5.modeling_t5 import T5Block
from functools import partial
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=False,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
check_fn = lambda submodule: isinstance(submodule, T5Block)
def apply_fsdp_checkpointing(model):
"""apply activation checkpointing to model
returns None as model is updated directly
"""
print(f"--> applying fdsp activation checkpointing...")
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
|
import torch
from torch.distributed.fsdp import (
# FullyShardedDataParallel as FSDP,
# CPUOffload,
MixedPrecision,
# BackwardPrefetch,
# ShardingStrategy,
)
# requires grad scaler in main loop
fpSixteen = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
bfSixteen = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
bfSixteen_working = MixedPrecision(
param_dtype=torch.float32,
reduce_dtype=torch.bfloat16,
buffer_dtype=torch.bfloat16,
)
fp32_policy = MixedPrecision(
param_dtype=torch.float32,
reduce_dtype=torch.float32,
buffer_dtype=torch.float32,
)
|
from .mixed_precision import *
from .wrapping import *
from .activation_checkpointing_functions import apply_fsdp_checkpointing
|
# holds various wrapping policies for fsdp
import torch.distributed as dist
import torch.nn as nn
import torch
from transformers.models.t5.modeling_t5 import T5Block
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
CPUOffload,
BackwardPrefetch,
MixedPrecision,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
size_based_auto_wrap_policy,
enable_wrap,
wrap,
)
import functools
from typing import Type
def get_size_policy(min_params=1e8):
num_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=min_params
)
return num_wrap_policy
def get_t5_wrapper():
"""we register our main layer class and use the fsdp transformer wrapping policy
ensures embedding layers are in the root fsdp unit for shared access and that fsdp units map to transformer layers
"""
# ==== use new transformer wrapper
t5_auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
T5Block,
},
)
return t5_auto_wrap_policy
|
from pathlib import Path
from datetime import datetime
import torch
import time
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
StateDictType,
FullStateDictConfig, # general model non-sharded, non-flattened params
LocalStateDictConfig, # flattened params, usable only by FSDP
# ShardedStateDictConfig, # un-flattened param but shards, usable by other parallel schemes.
)
from torch.distributed._shard.checkpoint import (
FileSystemReader,
FileSystemWriter,
save_state_dict,
load_state_dict,
)
from torch.distributed.checkpoint.default_planner import (
DefaultSavePlanner,
DefaultLoadPlanner,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
import torch.distributed._shard.checkpoint as dist_cp
import torch.distributed as dist
def get_date_of_run():
"""create date and time for file save uniqueness
example: 2022-05-07-08:31:12_PM'
"""
date_of_run = datetime.now().strftime("%Y-%m-%d-%I:%M:%S_%p")
print(f"--> current date and time of run = {date_of_run}")
return date_of_run
# create singleton saving policies to avoid making over and over
fullstate_save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
def load_model_sharded(model, rank, cfg, verbose=True):
# torch.manual_seed(103)
folder_name = (
cfg.dist_checkpoint_root_folder
+ "/"
+ cfg.dist_checkpoint_folder
+ "-"
+ cfg.model_name
)
load_dir = Path.cwd() / folder_name
if not load_dir.exists():
if rank == 0:
print(f"No sharded_state_dict checkpoint directory found...skipping")
return
reader = FileSystemReader(load_dir)
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
checkpoint = model.state_dict()
if rank == 0:
ck = checkpoint.keys()
print(f" checkpoint key len = {len(ck)} and \n keys = {ck}")
dist_cp.load_state_dict(
state_dict=checkpoint,
storage_reader=reader,
)
if rank == 0:
print(f"checkpoint after load_state_dict()")
ck = checkpoint.keys()
print(f" checkpoint key len = {len(ck)} and \n keys = {ck}")
model.load_state_dict(checkpoint)
if rank == 0:
print(f"Sharded state checkpoint loaded from {load_dir}")
def save_model_and_optimizer_sharded(model, rank, cfg,optim=None, verbose=True):
"""save model and optimizer via sharded_state_dict to save_dir"""
folder_name = (
cfg.dist_checkpoint_root_folder
+ "/"
+ cfg.dist_checkpoint_folder
+ "-"
+ cfg.model_name
)
save_dir = Path.cwd() / folder_name
if rank == 0:
print(f"Saving model to {save_dir}")
distributed_writer = dist_cp.FileSystemWriter(
save_dir,
)
t0 = time.perf_counter()
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = {"model": model.state_dict()}
if optim is not None:
state_dict["optim"] = FSDP.optim_state_dict(model, optim)
dist_cp.save_state_dict(
state_dict=state_dict,
storage_writer=distributed_writer,
planner=DefaultSavePlanner(),
)
dist.barrier()
t1 = time.perf_counter()
if rank == 0:
print(f"Sharded state checkpoint saved to {save_dir}")
print(
f"Checkpoint Time = {t1-t0:.4f}\n using {cfg.save_using_num_threads=} total threads"
)
def save_model_checkpoint(
model,
optimizer,
rank,
cfg,
epoch=1,
):
"""saving model via rank0 cpu streaming and full_state_dict"""
# saving with rank0 cpu
if not cfg.checkpoint_type == StateDictType.FULL_STATE_DICT:
print(f" unable to handle checkpoint type {cfg.checkpoint_type}, aborting")
with FSDP.state_dict_type(
model, StateDictType.FULL_STATE_DICT, fullstate_save_policy
):
cpu_state = model.state_dict()
if cfg.verbose:
print(f"saving process: rank {rank} done w model state_dict\n")
if rank == 0:
print(f"--> saving model ...")
# create save path
save_dir = Path.cwd() / cfg.checkpoint_folder
save_dir.mkdir(parents=True, exist_ok=True)
save_name = cfg.model_save_name + "-" + str(epoch) + ".pt"
save_full_path = str(save_dir) + "/" + save_name
# save model
torch.save(cpu_state, save_full_path)
if cfg.verbose:
print(f"model checkpoint saved for epoch {epoch} at {save_full_path}\n")
def load_model_checkpoint(model, rank, cfg, verbose=True):
"""load local checkpoint to rank0 cpu
must be called * before * passing to FSDP"""
if rank != 0:
return
# where is the checkpoint at...
full_state_dict_model_path = (
Path.cwd() / cfg.checkpoint_folder / cfg.checkpoint_model_filename
)
# is it present...
if not full_state_dict_model_path.is_file():
print(
f"model checkpoint {full_state_dict_model_path} not present. Returning..."
)
return
model_checkpoint = torch.load(full_state_dict_model_path)
# integrate into loaded model
model.load_state_dict(model_checkpoint)
if cfg.verbose:
print(f"model checkpoint loaded to rank0 cpu")
def save_optimizer_checkpoint(model, optimizer, rank, cfg, epoch=1):
"""save optimizer state via full state dict"""
if cfg.verbose:
print(f"--> optim state call on rank {rank}\n")
# pull all sharded optimizer states to rank0 cpu...
optim_state = FSDP.full_optim_state_dict(model, optimizer)
if cfg.verbose:
print(f"optim state dict ready on {rank} and len of {len(optim_state)}\n")
if rank == 0:
save_dir = Path.cwd() / cfg.checkpoint_folder
save_dir.mkdir(parents=True, exist_ok=True)
opt_save_name = (
cfg.optimizer_name + "-" + cfg.model_save_name + "-" + str(epoch) + ".pt"
)
opt_save_full_path = save_dir / opt_save_name
print(f"--> saving optimizer state...")
torch.save(optim_state, opt_save_full_path)
print(f"--> saved {opt_save_full_path} to disk")
def load_optimizer_checkpoint(model, optimizer, rank, cfg):
"""load an fdsp optimizer full_state checkpoint using scatter method
this ensures only rank 0 loads the optimizer state dict and scatters to other ranks
"""
opt_file_path = Path.cwd() / cfg.checkpoint_folder / cfg.optimizer_checkpoint_file
if not opt_file_path.is_file():
print(
f"warning - optimizer checkpoint not present {opt_file_path}. Returning. "
)
return
full_osd = None
if rank == 0:
full_osd = torch.load(opt_file_path)
if cfg.verbose:
print(f"loaded full osd on rank 0")
# called from all ranks, though only rank0 has a valid param for full_osd
sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)
if cfg.verbose:
print(f"optimizer shard loaded on rank {rank}")
def load_distributed_model_checkpoint(model, rank, cfg):
if cfg.checkpoint_type == StateDictType.LOCAL_STATE_DICT:
print(f"loading distributed checkpoint, rank {rank}...")
folder_name = (
cfg.dist_checkpoint_root_folder
+ "/"
+ cfg.dist_checkpoint_folder
+ "-"
+ cfg.model_name
)
checkdir = Path.cwd() / folder_name
if not checkdir.exists():
if rank == 0:
print(f"No checkpoint directory found...skipping")
return
reader = FileSystemReader(checkdir)
with FSDP.state_dict_type(
model,
StateDictType.LOCAL_STATE_DICT,
):
state_dict = model.state_dict()
load_state_dict(state_dict, reader)
model.load_state_dict(state_dict)
print(f"--> local state loaded on rank {rank}")
return
def save_distributed_model_checkpoint(model, rank, cfg, epoch=1):
# distributed checkpoint saving
# confirm type of checkpoint and save
if cfg.checkpoint_type == StateDictType.LOCAL_STATE_DICT:
# create writer to current path
folder_name = (
cfg.dist_checkpoint_root_folder
+ "/"
+ cfg.dist_checkpoint_folder
+ "-"
+ cfg.model_name
)
save_dir = Path.cwd() / folder_name
writer = FileSystemWriter(
save_dir,
)
with FSDP.state_dict_type(
model,
StateDictType.LOCAL_STATE_DICT,
):
state_dict = model.state_dict()
# write out distributed checkpoint
save_state_dict(state_dict, writer)
return
|
from .checkpoint_handler import (
load_model_checkpoint,
save_model_checkpoint,
save_distributed_model_checkpoint,
load_distributed_model_checkpoint,
load_optimizer_checkpoint,
save_optimizer_checkpoint,
save_model_and_optimizer_sharded,
load_model_sharded,
)
|
from dataclasses import dataclass, field
from typing import ClassVar
from torch.distributed.fsdp import ShardingStrategy
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
@dataclass
class fsdp_config:
mixed_precision: bool=True
use_fp16: bool=False
seed: int=42
fsdp_activation_checkpointing: bool=True
limit_all_gathers: bool=True
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #HYBRID_SHARD, SHARD_GRAD_OP
checkpoint_type: StateDictType = StateDictType.FULL_STATE_DICT # alternatively can use SHARDED_STATE_DICT to avoid OOMs
save_optimizer: bool=False
|
from .fsdp import fsdp_config
from .training import train_config
|
from dataclasses import dataclass
from typing import ClassVar
@dataclass
class train_config:
model_name: str="t5-base"
run_validation: bool=True
batch_size_training: int=4
num_workers_dataloader: int=2
lr: float=0.002
weight_decay: float=0.0
gamma: float= 0.85
use_fp16: bool=False
mixed_precision: bool=True
save_model: bool=False
|
import torch
from torch.utils.data import Dataset
import fsspec
from dataclasses import dataclass
"""
Adapted from https://github.com/karpathy/minGPT/blob/master/projects/chargpt/chargpt.py
"""
@dataclass
class DataConfig:
path: str = None
block_size: int = None
train_split: float = None
truncate: float = 1.0
class CharDataset(Dataset):
def __init__(self, data_cfg: DataConfig): #data_path: str, block_size):
data = fsspec.open(data_cfg.path).open().read().decode('utf-8')
data = data[ : int(len(data) * data_cfg.truncate)]
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Data has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = {ch: i for i, ch in enumerate(chars)}
self.itos = {i: ch for i, ch in enumerate(chars)}
self.block_size = data_cfg.block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return len(self.data) - self.block_size
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
chunk = self.data[idx:idx + self.block_size + 1]
# encode every character to an integer
dix = [self.stoi[s] for s in chunk]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
|
"""
Full definition of a GPT Language Model, all of it in this single file.
Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
from dataclasses import dataclass
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
@dataclass
class GPTConfig:
model_type: str = 'gpt2'
# model configurations
n_layer: int = None
n_head: int = None
n_embd: int = None
# openai's values for gpt2
vocab_size: int = 50257
block_size: int = 1024
# dropout hyperparameters
embd_pdrop: float = 0.1
resid_pdrop: float = 0.1
attn_pdrop: float = 0.1
@dataclass
class OptimizerConfig:
learning_rate: float = 3e-4
weight_decay: float = 0.1
class MultiheadAttentionLayer(nn.Module):
"""
A multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config, device="cpu", dtype=torch.float32):
super().__init__()
assert config.n_embd % config.n_head == 0
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.c_proj = nn.Linear(config.n_embd, config.n_embd, device=device, dtype=dtype)
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.attn = torch.nn.MultiheadAttention(
embed_dim=config.n_embd,
num_heads=config.n_head,
dropout=config.attn_pdrop,
batch_first=True,
device=device,
dtype=dtype
)
def forward(self, x):
_, seq_size, _ = x.size()
y = self.attn(x, x, x, attn_mask=self.mask[0, 0, :seq_size, :seq_size])[0]
y = self.resid_drop(self.c_proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config: GPTConfig):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = MultiheadAttentionLayer(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class EmbeddingStem(nn.Module):
def __init__(self, config: GPTConfig, device="cpu", dtype=torch.float32):
super().__init__()
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd, device=device, dtype=dtype)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd, device=device, dtype=dtype))
self.drop = nn.Dropout(config.embd_pdrop)
self.block_size = config.block_size
def reset_parameters(self):
self.tok_emb.reset_parameters()
def forward(self, idx):
b, t = idx.size()
assert t <= self.block_size, f"Cannot forward sequence of length {t}, block size is only {self.block_size}"
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) embedding vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) position vector
return self.drop(token_embeddings + position_embeddings)
class GPT(nn.Module):
""" GPT Language Model """
def __init__(self, config: GPTConfig):
super().__init__()
self.block_size = config.block_size
config = self._set_model_config(config)
# input embedding stem
self.emb_stem = EmbeddingStem(config)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# init all weights, and apply a special scaled init to the residual projections, per GPT-2 paper
self.apply(self._init_weights)
for pn, p in self.named_parameters():
if pn.endswith('c_proj.weight'):
p.data.normal_(mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
# report number of parameters (note we don't count the decoder parameters in lm_head)
n_params = sum(p.numel() for p in self.blocks.parameters())
print("number of parameters: %.2fM" % (n_params/1e6,))
def _set_model_config(self, config):
type_given = config.model_type is not None
params_given = all([config.n_layer is not None, config.n_head is not None, config.n_embd is not None])
# assert type_given ^ params_given # exactly one of these (XOR)
if type_given and not params_given:
# translate from model_type to detailed configuration
config.__dict__.update({
# names follow the huggingface naming conventions
# GPT-1
'openai-gpt': dict(n_layer=12, n_head=12, n_embd=768), # 117M params
# GPT-2 configs
'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
# Gophers
'gopher-44m': dict(n_layer=8, n_head=16, n_embd=512),
# (there are a number more...)
# I made these tiny models up
'gpt-mini': dict(n_layer=6, n_head=6, n_embd=192),
'gpt-micro': dict(n_layer=4, n_head=4, n_embd=128),
'gpt-nano': dict(n_layer=3, n_head=3, n_embd=48),
}[config.model_type])
return config
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, targets=None):
x = self.emb_stem(idx)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
return logits, loss
@torch.no_grad()
def generate(self, idx, max_new_tokens, temperature=1.0, do_sample=False, top_k=None):
"""
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
"""
for _ in range(max_new_tokens):
# if the sequence context is growing too long we must crop it at block_size
idx_cond = idx if idx.size(1) <= self.block_size else idx[:, -self.block_size:]
# forward the model to get the logits for the index in the sequence
logits, _ = self(idx_cond)
# pluck the logits at the final step and scale by desired temperature
logits = logits[:, -1, :] / temperature
# optionally crop the logits to only the top k options
if top_k is not None:
v, _ = torch.topk(logits, top_k)
logits[logits < v[:, [-1]]] = -float('Inf')
# apply softmax to convert logits to (normalized) probabilities
probs = F.softmax(logits, dim=-1)
# either sample from the distribution or take the most likely element
if do_sample:
idx_next = torch.multinomial(probs, num_samples=1)
else:
_, idx_next = torch.topk(probs, k=1, dim=-1)
# append sampled index to the running sequence and continue
idx = torch.cat((idx, idx_next), dim=1)
return idx
def create_optimizer(model: torch.nn.Module, opt_config: OptimizerConfig):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
# random note: because named_modules and named_parameters are recursive
# we will see the same tensors p many many times. but doing it this way
# allows us to know which parent module any tensor p belongs to...
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('in_proj_weight'):
# MHA projection layer
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
elif pn.endswith('pos_emb'):
# positional embedding shouldn't be decayed
no_decay.add(fpn)
# validate that we considered every parameter
param_dict = {pn: p for pn, p in model.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": opt_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=opt_config.learning_rate, betas=(0.9, 0.95))
return optimizer |
"""
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
so nothing in this file really has anything to do with GPT specifically.
"""
from dataclasses import dataclass, asdict
from collections import OrderedDict
from typing import Optional, Any, Dict
import os
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import boto3
from urllib.parse import urlparse
import fsspec
import io
@dataclass
class TrainerConfig:
max_epochs: int = None
batch_size: int = None
data_loader_workers: int = None
grad_norm_clip: float = None
snapshot_path: Optional[str] = None
save_every: int = None
use_amp: bool = None
@dataclass
class Snapshot:
model_state: 'OrderedDict[str, torch.Tensor]'
optimizer_state: Dict[str, Any]
finished_epoch: int
def upload_to_s3(obj, dst):
buffer = io.BytesIO()
torch.save(obj, buffer)
buffer.seek(0)
dst = urlparse(dst, allow_fragments=False)
boto3.client('s3').upload_fileobj(buffer, dst.netloc, dst.path.lstrip('/'))
class Trainer:
def __init__(self, trainer_config: TrainerConfig, model, optimizer, train_dataset, test_dataset=None):
self.config = trainer_config
# set torchrun variables
self.local_rank = int(os.environ["LOCAL_RANK"])
self.global_rank = int(os.environ["RANK"])
# data stuff
self.train_dataset = train_dataset
self.train_loader = self._prepare_dataloader(train_dataset)
self.test_loader = self._prepare_dataloader(test_dataset) if test_dataset else None
# initialize train states
self.epochs_run = 0
self.model = model.to(self.local_rank)
self.optimizer = optimizer
self.save_every = self.config.save_every
if self.config.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# load snapshot if available. only necessary on the first node.
if self.config.snapshot_path is None:
self.config.snapshot_path = "snapshot.pt"
self._load_snapshot()
# wrap with DDP. this step will synch model across all the processes.
self.model = DDP(self.model, device_ids=[self.local_rank])
def _prepare_dataloader(self, dataset: Dataset):
return DataLoader(
dataset,
batch_size=self.config.batch_size,
pin_memory=True,
shuffle=False,
num_workers=self.config.data_loader_workers,
sampler=DistributedSampler(dataset)
)
def _load_snapshot(self):
try:
snapshot = fsspec.open(self.config.snapshot_path)
with snapshot as f:
snapshot_data = torch.load(f, map_location="cpu")
except FileNotFoundError:
print("Snapshot not found. Training model from scratch")
return
snapshot = Snapshot(**snapshot_data)
self.model.load_state_dict(snapshot.model_state)
self.optimizer.load_state_dict(snapshot.optimizer_state)
self.epochs_run = snapshot.finished_epoch
print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
def _run_batch(self, source, targets, train: bool = True) -> float:
with torch.set_grad_enabled(train), torch.amp.autocast(device_type="cuda", dtype=torch.float16, enabled=(self.config.use_amp)):
_, loss = self.model(source, targets)
if train:
self.optimizer.zero_grad(set_to_none=True)
if self.config.use_amp:
self.scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)
self.optimizer.step()
return loss.item()
def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):
dataloader.sampler.set_epoch(epoch)
for iter, (source, targets) in enumerate(dataloader):
step_type = "Train" if train else "Eval"
source = source.to(self.local_rank)
targets = targets.to(self.local_rank)
batch_loss = self._run_batch(source, targets, train)
if iter % 100 == 0:
print(f"[GPU{self.global_rank}] Epoch {epoch} | Iter {iter} | {step_type} Loss {batch_loss:.5f}")
def _save_snapshot(self, epoch):
# capture snapshot
model = self.model
raw_model = model.module if hasattr(model, "module") else model
snapshot = Snapshot(
model_state=raw_model.state_dict(),
optimizer_state=self.optimizer.state_dict(),
finished_epoch=epoch
)
# save snapshot
snapshot = asdict(snapshot)
if self.config.snapshot_path.startswith("s3://"):
upload_to_s3(snapshot, self.config.snapshot_path)
else:
torch.save(snapshot, self.config.snapshot_path)
print(f"Snapshot saved at epoch {epoch}")
def train(self):
for epoch in range(self.epochs_run, self.config.max_epochs):
epoch += 1
self._run_epoch(epoch, self.train_loader, train=True)
if self.local_rank == 0 and epoch % self.save_every == 0:
self._save_snapshot(epoch)
# eval run
if self.test_loader:
self._run_epoch(epoch, self.test_loader, train=False)
|
import os
import torch
from torch.utils.data import random_split
from torch.distributed import init_process_group, destroy_process_group
from model import GPT, GPTConfig, OptimizerConfig, create_optimizer
from trainer import Trainer, TrainerConfig
from char_dataset import CharDataset, DataConfig
from omegaconf import DictConfig
import hydra
def ddp_setup():
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def get_train_objs(gpt_cfg: GPTConfig, opt_cfg: OptimizerConfig, data_cfg: DataConfig):
dataset = CharDataset(data_cfg)
train_len = int(len(dataset) * data_cfg.train_split)
train_set, test_set = random_split(dataset, [train_len, len(dataset) - train_len])
gpt_cfg.vocab_size = dataset.vocab_size
gpt_cfg.block_size = dataset.block_size
model = GPT(gpt_cfg)
optimizer = create_optimizer(model, opt_cfg)
return model, optimizer, train_set, test_set
@hydra.main(version_base=None, config_path=".", config_name="gpt2_train_cfg")
def main(cfg: DictConfig):
ddp_setup()
gpt_cfg = GPTConfig(**cfg['gpt_config'])
opt_cfg = OptimizerConfig(**cfg['optimizer_config'])
data_cfg = DataConfig(**cfg['data_config'])
trainer_cfg = TrainerConfig(**cfg['trainer_config'])
model, optimizer, train_data, test_data = get_train_objs(gpt_cfg, opt_cfg, data_cfg)
trainer = Trainer(trainer_cfg, model, optimizer, train_data, test_data)
trainer.train()
destroy_process_group()
if __name__ == "__main__":
main()
|
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os
def ddp_setup():
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
class Trainer:
def __init__(
self,
model: torch.nn.Module,
train_data: DataLoader,
optimizer: torch.optim.Optimizer,
save_every: int,
snapshot_path: str,
) -> None:
self.local_rank = int(os.environ["LOCAL_RANK"])
self.global_rank = int(os.environ["RANK"])
self.model = model.to(self.local_rank)
self.train_data = train_data
self.optimizer = optimizer
self.save_every = save_every
self.epochs_run = 0
self.snapshot_path = snapshot_path
if os.path.exists(snapshot_path):
print("Loading snapshot")
self._load_snapshot(snapshot_path)
self.model = DDP(self.model, device_ids=[self.local_rank])
def _load_snapshot(self, snapshot_path):
loc = f"cuda:{self.local_rank}"
snapshot = torch.load(snapshot_path, map_location=loc)
self.model.load_state_dict(snapshot["MODEL_STATE"])
self.epochs_run = snapshot["EPOCHS_RUN"]
print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
def _run_batch(self, source, targets):
self.optimizer.zero_grad()
output = self.model(source)
loss = F.cross_entropy(output, targets)
loss.backward()
self.optimizer.step()
def _run_epoch(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
print(f"[GPU{self.global_rank}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
self.train_data.sampler.set_epoch(epoch)
for source, targets in self.train_data:
source = source.to(self.local_rank)
targets = targets.to(self.local_rank)
self._run_batch(source, targets)
def _save_snapshot(self, epoch):
snapshot = {
"MODEL_STATE": self.model.module.state_dict(),
"EPOCHS_RUN": epoch,
}
torch.save(snapshot, self.snapshot_path)
print(f"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}")
def train(self, max_epochs: int):
for epoch in range(self.epochs_run, max_epochs):
self._run_epoch(epoch)
if self.local_rank == 0 and epoch % self.save_every == 0:
self._save_snapshot(epoch)
def load_train_objs():
train_set = MyTrainDataset(2048) # load your dataset
model = torch.nn.Linear(20, 1) # load your model
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
return train_set, model, optimizer
def prepare_dataloader(dataset: Dataset, batch_size: int):
return DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=False,
sampler=DistributedSampler(dataset)
)
def main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = "snapshot.pt"):
ddp_setup()
dataset, model, optimizer = load_train_objs()
train_data = prepare_dataloader(dataset, batch_size)
trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)
trainer.train(total_epochs)
destroy_process_group()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='simple distributed training job')
parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
parser.add_argument('save_every', type=int, help='How often to save a snapshot')
parser.add_argument('--batch_size', default=32, type=int, help='Input batch size on each device (default: 32)')
args = parser.parse_args()
main(args.save_every, args.total_epochs, args.batch_size)
|
import torch
from torch.utils.data import Dataset
class MyTrainDataset(Dataset):
def __init__(self, size):
self.size = size
self.data = [(torch.rand(20), torch.rand(1)) for _ in range(size)]
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index] |
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
class Trainer:
def __init__(
self,
model: torch.nn.Module,
train_data: DataLoader,
optimizer: torch.optim.Optimizer,
gpu_id: int,
save_every: int,
) -> None:
self.gpu_id = gpu_id
self.model = model.to(gpu_id)
self.train_data = train_data
self.optimizer = optimizer
self.save_every = save_every
def _run_batch(self, source, targets):
self.optimizer.zero_grad()
output = self.model(source)
loss = F.cross_entropy(output, targets)
loss.backward()
self.optimizer.step()
def _run_epoch(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
print(f"[GPU{self.gpu_id}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
for source, targets in self.train_data:
source = source.to(self.gpu_id)
targets = targets.to(self.gpu_id)
self._run_batch(source, targets)
def _save_checkpoint(self, epoch):
ckp = self.model.state_dict()
PATH = "checkpoint.pt"
torch.save(ckp, PATH)
print(f"Epoch {epoch} | Training checkpoint saved at {PATH}")
def train(self, max_epochs: int):
for epoch in range(max_epochs):
self._run_epoch(epoch)
if epoch % self.save_every == 0:
self._save_checkpoint(epoch)
def load_train_objs():
train_set = MyTrainDataset(2048) # load your dataset
model = torch.nn.Linear(20, 1) # load your model
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
return train_set, model, optimizer
def prepare_dataloader(dataset: Dataset, batch_size: int):
return DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=True
)
def main(device, total_epochs, save_every, batch_size):
dataset, model, optimizer = load_train_objs()
train_data = prepare_dataloader(dataset, batch_size)
trainer = Trainer(model, train_data, optimizer, device, save_every)
trainer.train(total_epochs)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='simple distributed training job')
parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
parser.add_argument('save_every', type=int, help='How often to save a snapshot')
parser.add_argument('--batch_size', default=32, type=int, help='Input batch size on each device (default: 32)')
args = parser.parse_args()
device = 0 # shorthand for cuda:0
main(device, args.total_epochs, args.save_every, args.batch_size)
|
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os
def ddp_setup(rank, world_size):
"""
Args:
rank: Unique identifier of each process
world_size: Total number of processes
"""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
init_process_group(backend="nccl", rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
class Trainer:
def __init__(
self,
model: torch.nn.Module,
train_data: DataLoader,
optimizer: torch.optim.Optimizer,
gpu_id: int,
save_every: int,
) -> None:
self.gpu_id = gpu_id
self.model = model.to(gpu_id)
self.train_data = train_data
self.optimizer = optimizer
self.save_every = save_every
self.model = DDP(model, device_ids=[gpu_id])
def _run_batch(self, source, targets):
self.optimizer.zero_grad()
output = self.model(source)
loss = F.cross_entropy(output, targets)
loss.backward()
self.optimizer.step()
def _run_epoch(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
print(f"[GPU{self.gpu_id}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
self.train_data.sampler.set_epoch(epoch)
for source, targets in self.train_data:
source = source.to(self.gpu_id)
targets = targets.to(self.gpu_id)
self._run_batch(source, targets)
def _save_checkpoint(self, epoch):
ckp = self.model.module.state_dict()
PATH = "checkpoint.pt"
torch.save(ckp, PATH)
print(f"Epoch {epoch} | Training checkpoint saved at {PATH}")
def train(self, max_epochs: int):
for epoch in range(max_epochs):
self._run_epoch(epoch)
if self.gpu_id == 0 and epoch % self.save_every == 0:
self._save_checkpoint(epoch)
def load_train_objs():
train_set = MyTrainDataset(2048) # load your dataset
model = torch.nn.Linear(20, 1) # load your model
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
return train_set, model, optimizer
def prepare_dataloader(dataset: Dataset, batch_size: int):
return DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=False,
sampler=DistributedSampler(dataset)
)
def main(rank: int, world_size: int, save_every: int, total_epochs: int, batch_size: int):
ddp_setup(rank, world_size)
dataset, model, optimizer = load_train_objs()
train_data = prepare_dataloader(dataset, batch_size)
trainer = Trainer(model, train_data, optimizer, rank, save_every)
trainer.train(total_epochs)
destroy_process_group()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='simple distributed training job')
parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
parser.add_argument('save_every', type=int, help='How often to save a snapshot')
parser.add_argument('--batch_size', default=32, type=int, help='Input batch size on each device (default: 32)')
args = parser.parse_args()
world_size = torch.cuda.device_count()
mp.spawn(main, args=(world_size, args.save_every, args.total_epochs, args.batch_size), nprocs=world_size)
|
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from datautils import MyTrainDataset
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os
def ddp_setup():
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
class Trainer:
def __init__(
self,
model: torch.nn.Module,
train_data: DataLoader,
optimizer: torch.optim.Optimizer,
save_every: int,
snapshot_path: str,
) -> None:
self.gpu_id = int(os.environ["LOCAL_RANK"])
self.model = model.to(self.gpu_id)
self.train_data = train_data
self.optimizer = optimizer
self.save_every = save_every
self.epochs_run = 0
self.snapshot_path = snapshot_path
if os.path.exists(snapshot_path):
print("Loading snapshot")
self._load_snapshot(snapshot_path)
self.model = DDP(self.model, device_ids=[self.gpu_id])
def _load_snapshot(self, snapshot_path):
loc = f"cuda:{self.gpu_id}"
snapshot = torch.load(snapshot_path, map_location=loc)
self.model.load_state_dict(snapshot["MODEL_STATE"])
self.epochs_run = snapshot["EPOCHS_RUN"]
print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
def _run_batch(self, source, targets):
self.optimizer.zero_grad()
output = self.model(source)
loss = F.cross_entropy(output, targets)
loss.backward()
self.optimizer.step()
def _run_epoch(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
print(f"[GPU{self.gpu_id}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
self.train_data.sampler.set_epoch(epoch)
for source, targets in self.train_data:
source = source.to(self.gpu_id)
targets = targets.to(self.gpu_id)
self._run_batch(source, targets)
def _save_snapshot(self, epoch):
snapshot = {
"MODEL_STATE": self.model.module.state_dict(),
"EPOCHS_RUN": epoch,
}
torch.save(snapshot, self.snapshot_path)
print(f"Epoch {epoch} | Training snapshot saved at {self.snapshot_path}")
def train(self, max_epochs: int):
for epoch in range(self.epochs_run, max_epochs):
self._run_epoch(epoch)
if self.gpu_id == 0 and epoch % self.save_every == 0:
self._save_snapshot(epoch)
def load_train_objs():
train_set = MyTrainDataset(2048) # load your dataset
model = torch.nn.Linear(20, 1) # load your model
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
return train_set, model, optimizer
def prepare_dataloader(dataset: Dataset, batch_size: int):
return DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=False,
sampler=DistributedSampler(dataset)
)
def main(save_every: int, total_epochs: int, batch_size: int, snapshot_path: str = "snapshot.pt"):
ddp_setup()
dataset, model, optimizer = load_train_objs()
train_data = prepare_dataloader(dataset, batch_size)
trainer = Trainer(model, train_data, optimizer, save_every, snapshot_path)
trainer.train(total_epochs)
destroy_process_group()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='simple distributed training job')
parser.add_argument('total_epochs', type=int, help='Total epochs to train the model')
parser.add_argument('save_every', type=int, help='How often to save a snapshot')
parser.add_argument('--batch_size', default=32, type=int, help='Input batch size on each device (default: 32)')
args = parser.parse_args()
main(args.save_every, args.total_epochs, args.batch_size)
|
import os
import threading
import time
from functools import wraps
import torch
import torch.nn as nn
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef
from torchvision.models.resnet import Bottleneck
#########################################################
# Define Model Parallel ResNet50 #
#########################################################
# In order to split the ResNet50 and place it on two different workers, we
# implement it in two model shards. The ResNetBase class defines common
# attributes and methods shared by two shards. ResNetShard1 and ResNetShard2
# contain two partitions of the model layers respectively.
num_classes = 1000
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class ResNetBase(nn.Module):
def __init__(self, block, inplanes, num_classes=1000,
groups=1, width_per_group=64, norm_layer=None):
super(ResNetBase, self).__init__()
self._lock = threading.Lock()
self._block = block
self._norm_layer = nn.BatchNorm2d
self.inplanes = inplanes
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
def _make_layer(self, planes, blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if stride != 1 or self.inplanes != planes * self._block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * self._block.expansion, stride),
norm_layer(planes * self._block.expansion),
)
layers = []
layers.append(self._block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * self._block.expansion
for _ in range(1, blocks):
layers.append(self._block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def parameter_rrefs(self):
r"""
Create one RRef for each parameter in the given local module, and return a
list of RRefs.
"""
return [RRef(p) for p in self.parameters()]
class ResNetShard1(ResNetBase):
"""
The first part of ResNet.
"""
def __init__(self, device, *args, **kwargs):
super(ResNetShard1, self).__init__(
Bottleneck, 64, num_classes=num_classes, *args, **kwargs)
self.device = device
self.seq = nn.Sequential(
nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False),
self._norm_layer(self.inplanes),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(64, 3),
self._make_layer(128, 4, stride=2)
).to(self.device)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x_rref):
x = x_rref.to_here().to(self.device)
with self._lock:
out = self.seq(x)
return out.cpu()
class ResNetShard2(ResNetBase):
"""
The second part of ResNet.
"""
def __init__(self, device, *args, **kwargs):
super(ResNetShard2, self).__init__(
Bottleneck, 512, num_classes=num_classes, *args, **kwargs)
self.device = device
self.seq = nn.Sequential(
self._make_layer(256, 6, stride=2),
self._make_layer(512, 3, stride=2),
nn.AdaptiveAvgPool2d((1, 1)),
).to(self.device)
self.fc = nn.Linear(512 * self._block.expansion, num_classes).to(self.device)
def forward(self, x_rref):
x = x_rref.to_here().to(self.device)
with self._lock:
out = self.fc(torch.flatten(self.seq(x), 1))
return out.cpu()
class DistResNet50(nn.Module):
"""
Assemble two parts as an nn.Module and define pipelining logic
"""
def __init__(self, split_size, workers, *args, **kwargs):
super(DistResNet50, self).__init__()
self.split_size = split_size
# Put the first part of the ResNet50 on workers[0]
self.p1_rref = rpc.remote(
workers[0],
ResNetShard1,
args = ("cuda:0",) + args,
kwargs = kwargs
)
# Put the second part of the ResNet50 on workers[1]
self.p2_rref = rpc.remote(
workers[1],
ResNetShard2,
args = ("cuda:1",) + args,
kwargs = kwargs
)
def forward(self, xs):
# Split the input batch xs into micro-batches, and collect async RPC
# futures into a list
out_futures = []
for x in iter(xs.split(self.split_size, dim=0)):
x_rref = RRef(x)
y_rref = self.p1_rref.remote().forward(x_rref)
z_fut = self.p2_rref.rpc_async().forward(y_rref)
out_futures.append(z_fut)
# collect and cat all output tensors into one tensor.
return torch.cat(torch.futures.wait_all(out_futures))
def parameter_rrefs(self):
remote_params = []
remote_params.extend(self.p1_rref.remote().parameter_rrefs().to_here())
remote_params.extend(self.p2_rref.remote().parameter_rrefs().to_here())
return remote_params
#########################################################
# Run RPC Processes #
#########################################################
num_batches = 3
batch_size = 120
image_w = 128
image_h = 128
def run_master(split_size):
# put the two model parts on worker1 and worker2 respectively
model = DistResNet50(split_size, ["worker1", "worker2"])
loss_fn = nn.MSELoss()
opt = DistributedOptimizer(
optim.SGD,
model.parameter_rrefs(),
lr=0.05,
)
one_hot_indices = torch.LongTensor(batch_size) \
.random_(0, num_classes) \
.view(batch_size, 1)
for i in range(num_batches):
print(f"Processing batch {i}")
# generate random inputs and labels
inputs = torch.randn(batch_size, 3, image_w, image_h)
labels = torch.zeros(batch_size, num_classes) \
.scatter_(1, one_hot_indices, 1)
# The distributed autograd context is the dedicated scope for the
# distributed backward pass to store gradients, which can later be
# retrieved using the context_id by the distributed optimizer.
with dist_autograd.context() as context_id:
outputs = model(inputs)
dist_autograd.backward(context_id, [loss_fn(outputs, labels)])
opt.step(context_id)
def run_worker(rank, world_size, num_split):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
# Higher timeout is added to accommodate for kernel compilation time in case of ROCm.
options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=256, rpc_timeout=300)
if rank == 0:
rpc.init_rpc(
"master",
rank=rank,
world_size=world_size,
rpc_backend_options=options
)
run_master(num_split)
else:
rpc.init_rpc(
f"worker{rank}",
rank=rank,
world_size=world_size,
rpc_backend_options=options
)
pass
# block until all rpcs finish
rpc.shutdown()
if __name__=="__main__":
world_size = 3
for num_split in [1, 2, 4, 8]:
tik = time.time()
mp.spawn(run_worker, args=(world_size, num_split), nprocs=world_size, join=True)
tok = time.time()
print(f"number of splits = {num_split}, execution time = {tok - tik}")
|
import random
import torch
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.nn import RemoteModule
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.nn.parallel import DistributedDataParallel as DDP
NUM_EMBEDDINGS = 100
EMBEDDING_DIM = 16
class HybridModel(torch.nn.Module):
r"""
The model consists of a sparse part and a dense part.
1) The dense part is an nn.Linear module that is replicated across all trainers using DistributedDataParallel.
2) The sparse part is a Remote Module that holds an nn.EmbeddingBag on the parameter server.
This remote model can get a Remote Reference to the embedding table on the parameter server.
"""
def __init__(self, remote_emb_module, device):
super(HybridModel, self).__init__()
self.remote_emb_module = remote_emb_module
self.fc = DDP(torch.nn.Linear(16, 8).cuda(device), device_ids=[device])
self.device = device
def forward(self, indices, offsets):
emb_lookup = self.remote_emb_module.forward(indices, offsets)
return self.fc(emb_lookup.cuda(self.device))
def _run_trainer(remote_emb_module, rank):
r"""
Each trainer runs a forward pass which involves an embedding lookup on the
parameter server and running nn.Linear locally. During the backward pass,
DDP is responsible for aggregating the gradients for the dense part
(nn.Linear) and distributed autograd ensures gradients updates are
propagated to the parameter server.
"""
# Setup the model.
model = HybridModel(remote_emb_module, rank)
# Retrieve all model parameters as rrefs for DistributedOptimizer.
# Retrieve parameters for embedding table.
model_parameter_rrefs = model.remote_emb_module.remote_parameters()
# model.fc.parameters() only includes local parameters.
# NOTE: Cannot call model.parameters() here,
# because this will call remote_emb_module.parameters(),
# which supports remote_parameters() but not parameters().
for param in model.fc.parameters():
model_parameter_rrefs.append(RRef(param))
# Setup distributed optimizer
opt = DistributedOptimizer(
optim.SGD,
model_parameter_rrefs,
lr=0.05,
)
criterion = torch.nn.CrossEntropyLoss()
def get_next_batch(rank):
for _ in range(10):
num_indices = random.randint(20, 50)
indices = torch.LongTensor(num_indices).random_(0, NUM_EMBEDDINGS)
# Generate offsets.
offsets = []
start = 0
batch_size = 0
while start < num_indices:
offsets.append(start)
start += random.randint(1, 10)
batch_size += 1
offsets_tensor = torch.LongTensor(offsets)
target = torch.LongTensor(batch_size).random_(8).cuda(rank)
yield indices, offsets_tensor, target
# Train for 100 epochs
for epoch in range(100):
# create distributed autograd context
for indices, offsets, target in get_next_batch(rank):
with dist_autograd.context() as context_id:
output = model(indices, offsets)
loss = criterion(output, target)
# Run distributed backward pass
dist_autograd.backward(context_id, [loss])
# Tun distributed optimizer
opt.step(context_id)
# Not necessary to zero grads as each iteration creates a different
# distributed autograd context which hosts different grads
print("Training done for epoch {}".format(epoch))
def run_worker(rank, world_size):
r"""
A wrapper function that initializes RPC, calls the function, and shuts down
RPC.
"""
# We need to use different port numbers in TCP init_method for init_rpc and
# init_process_group to avoid port conflicts.
rpc_backend_options = TensorPipeRpcBackendOptions()
rpc_backend_options.init_method = "tcp://localhost:29501"
# Rank 2 is master, 3 is ps and 0 and 1 are trainers.
if rank == 2:
rpc.init_rpc(
"master",
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options,
)
remote_emb_module = RemoteModule(
"ps",
torch.nn.EmbeddingBag,
args=(NUM_EMBEDDINGS, EMBEDDING_DIM),
kwargs={"mode": "sum"},
)
# Run the training loop on trainers.
futs = []
for trainer_rank in [0, 1]:
trainer_name = "trainer{}".format(trainer_rank)
fut = rpc.rpc_async(
trainer_name, _run_trainer, args=(remote_emb_module, trainer_rank)
)
futs.append(fut)
# Wait for all training to finish.
for fut in futs:
fut.wait()
elif rank <= 1:
# Initialize process group for Distributed DataParallel on trainers.
dist.init_process_group(
backend="gloo", rank=rank, world_size=2, init_method="tcp://localhost:29500"
)
# Initialize RPC.
trainer_name = "trainer{}".format(rank)
rpc.init_rpc(
trainer_name,
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options,
)
# Trainer just waits for RPCs from master.
else:
rpc.init_rpc(
"ps",
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options,
)
# parameter server do nothing
pass
# block until all rpcs finish
rpc.shutdown()
if __name__ == "__main__":
# 2 trainers, 1 parameter server, 1 master.
world_size = 4
mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 28