python_code
stringlengths 0
229k
|
---|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
"""
same output as PIL.ImageOps.autocontrast
"""
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
"""
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
"""
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0:
return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
"""
like PIL, rotate by degree, not radians
"""
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
"""
same output as PIL.ImageOps.posterize
"""
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
"""
same output as PIL.ImageEnhance.Color
"""
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = np.float32(
[[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]
) * factor + np.float32([[0.114], [0.587], [0.299]])
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = (
np.array([(el - mean) * factor + mean for el in range(256)])
.clip(0, 255)
.astype(np.uint8)
)
out = table[img]
return out
def brightness_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
"""
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
"""
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def posterize_func(img, bits):
"""
same output as PIL.ImageOps.posterize
"""
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level,)
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level,)
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
"Identity": identity_func,
"AutoContrast": autocontrast_func,
"Equalize": equalize_func,
"Rotate": rotate_func,
"Solarize": solarize_func,
"Color": color_func,
"Contrast": contrast_func,
"Brightness": brightness_func,
"Sharpness": sharpness_func,
"ShearX": shear_x_func,
"TranslateX": translate_x_func,
"TranslateY": translate_y_func,
"Posterize": posterize_func,
"ShearY": shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
"Identity": none_level_to_args,
"AutoContrast": none_level_to_args,
"Equalize": none_level_to_args,
"Rotate": rotate_level_to_args(MAX_LEVEL, replace_value),
"Solarize": solarize_level_to_args(MAX_LEVEL),
"Color": enhance_level_to_args(MAX_LEVEL),
"Contrast": enhance_level_to_args(MAX_LEVEL),
"Brightness": enhance_level_to_args(MAX_LEVEL),
"Sharpness": enhance_level_to_args(MAX_LEVEL),
"ShearX": shear_level_to_args(MAX_LEVEL, replace_value),
"TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"Posterize": posterize_level_to_args(MAX_LEVEL),
"ShearY": shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
class VideoRandomAugment(object):
def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):
self.N = N
self.M = M
self.p = p
self.tensor_in_tensor_out = tensor_in_tensor_out
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N, replace=False)
return [(op, self.M) for op in sampled_ops]
def __call__(self, frames):
assert (
frames.shape[-1] == 3
), "Expecting last dimension for 3-channels RGB (b, h, w, c)."
if self.tensor_in_tensor_out:
frames = frames.numpy().astype(np.uint8)
num_frames = frames.shape[0]
ops = num_frames * [self.get_random_ops()]
apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]
frames = torch.stack(
list(map(self._aug, frames, ops, apply_or_not)), dim=0
).float()
return frames
def _aug(self, img, ops, apply_or_not):
for i, (name, level) in enumerate(ops):
if not apply_or_not[i]:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return torch.from_numpy(img)
if __name__ == "__main__":
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img)
|
#!/usr/bin/env python3
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
RandomCrop,
RandomResizedCrop,
)
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(
f"size should be tuple (height, width), instead got {size}"
)
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizonal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_lr_scheduler("linear_warmup_step_lr")
class LinearWarmupStepLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
decay_rate=1,
warmup_start_lr=-1,
warmup_steps=0,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.decay_rate = decay_rate
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
step_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
init_lr=self.init_lr,
min_lr=self.min_lr,
decay_rate=self.decay_rate,
)
@registry.register_lr_scheduler("linear_warmup_cosine_lr")
class LinearWarmupCosineLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
warmup_steps=0,
warmup_start_lr=-1,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
# assuming the warmup iters less than one epoch
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
cosine_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
max_epoch=self.max_epoch,
init_lr=self.init_lr,
min_lr=self.min_lr,
)
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
"""Decay the learning rate"""
lr = (init_lr - min_lr) * 0.5 * (
1.0 + math.cos(math.pi * epoch / max_epoch)
) + min_lr
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
"""Warmup the learning rate"""
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
"""Decay the learning rate"""
lr = max(min_lr, init_lr * (decay_rate**epoch))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class Config:
def __init__(self, args):
self.config = {}
self.args = args
# Register the config and configuration for setup
registry.register("configuration", self)
user_config = self._build_opt_list(self.args.options)
config = OmegaConf.load(self.args.cfg_path)
runner_config = self.build_runner_config(config)
model_config = self.build_model_config(config, **user_config)
dataset_config = self.build_dataset_config(config)
# Validate the user-provided runner configuration
# model and dataset configuration are supposed to be validated by the respective classes
# [TODO] validate the model/dataset configuration
# self._validate_runner_config(runner_config)
# Override the default configuration with user options.
self.config = OmegaConf.merge(
runner_config, model_config, dataset_config, user_config
)
def _validate_runner_config(self, runner_config):
"""
This method validates the configuration, such that
1) all the user specified options are valid;
2) no type mismatches between the user specified options and the config.
"""
runner_config_validator = create_runner_config_validator()
runner_config_validator.validate(runner_config)
def _build_opt_list(self, opts):
opts_dot_list = self._convert_to_dot_list(opts)
return OmegaConf.from_dotlist(opts_dot_list)
@staticmethod
def build_model_config(config, **kwargs):
model = config.get("model", None)
assert model is not None, "Missing model configuration file."
model_cls = registry.get_model_class(model.arch)
assert model_cls is not None, f"Model '{model.arch}' has not been registered."
model_type = kwargs.get("model.model_type", None)
if not model_type:
model_type = model.get("model_type", None)
# else use the model type selected by user.
assert model_type is not None, "Missing model_type."
model_config_path = model_cls.default_config_path(model_type=model_type)
model_config = OmegaConf.create()
# hiararchy override, customized config > default config
model_config = OmegaConf.merge(
model_config,
OmegaConf.load(model_config_path),
{"model": config["model"]},
)
return model_config
@staticmethod
def build_runner_config(config):
return {"run": config.run}
@staticmethod
def build_dataset_config(config):
datasets = config.get("datasets", None)
if datasets is None:
raise KeyError(
"Expecting 'datasets' as the root key for dataset configuration."
)
dataset_config = OmegaConf.create()
for dataset_name in datasets:
builder_cls = registry.get_builder_class(dataset_name)
dataset_config_type = datasets[dataset_name].get("type", "default")
dataset_config_path = builder_cls.default_config_path(
type=dataset_config_type
)
# hiararchy override, customized config > default config
dataset_config = OmegaConf.merge(
dataset_config,
OmegaConf.load(dataset_config_path),
{"datasets": {dataset_name: config["datasets"][dataset_name]}},
)
return dataset_config
def _convert_to_dot_list(self, opts):
if opts is None:
opts = []
if len(opts) == 0:
return opts
has_equal = opts[0].find("=") != -1
if has_equal:
return opts
return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
def get_config(self):
return self.config
@property
def run_cfg(self):
return self.config.run
@property
def datasets_cfg(self):
return self.config.datasets
@property
def model_cfg(self):
return self.config.model
def pretty_print(self):
logging.info("\n===== Running Parameters =====")
logging.info(self._convert_node_to_json(self.config.run))
logging.info("\n====== Dataset Attributes ======")
datasets = self.config.datasets
for dataset in datasets:
if dataset in self.config.datasets:
logging.info(f"\n======== {dataset} =======")
dataset_config = self.config.datasets[dataset]
logging.info(self._convert_node_to_json(dataset_config))
else:
logging.warning(f"No dataset named '{dataset}' in config. Skipping")
logging.info(f"\n====== Model Attributes ======")
logging.info(self._convert_node_to_json(self.config.model))
def _convert_node_to_json(self, node):
container = OmegaConf.to_container(node, resolve=True)
return json.dumps(container, indent=4, sort_keys=True)
def to_dict(self):
return OmegaConf.to_container(self.config)
def node_to_dict(node):
return OmegaConf.to_container(node)
class ConfigValidator:
"""
This is a preliminary implementation to centralize and validate the configuration.
May be altered in the future.
A helper class to validate configurations from yaml file.
This serves the following purposes:
1. Ensure all the options in the yaml are defined, raise error if not.
2. when type mismatches are found, the validator will raise an error.
3. a central place to store and display helpful messages for supported configurations.
"""
class _Argument:
def __init__(self, name, choices=None, type=None, help=None):
self.name = name
self.val = None
self.choices = choices
self.type = type
self.help = help
def __str__(self):
s = f"{self.name}={self.val}"
if self.type is not None:
s += f", ({self.type})"
if self.choices is not None:
s += f", choices: {self.choices}"
if self.help is not None:
s += f", ({self.help})"
return s
def __init__(self, description):
self.description = description
self.arguments = dict()
self.parsed_args = None
def __getitem__(self, key):
assert self.parsed_args is not None, "No arguments parsed yet."
return self.parsed_args[key]
def __str__(self) -> str:
return self.format_help()
def add_argument(self, *args, **kwargs):
"""
Assume the first argument is the name of the argument.
"""
self.arguments[args[0]] = self._Argument(*args, **kwargs)
def validate(self, config=None):
"""
Convert yaml config (dict-like) to list, required by argparse.
"""
for k, v in config.items():
assert (
k in self.arguments
), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}."""
if self.arguments[k].type is not None:
try:
self.arguments[k].val = self.arguments[k].type(v)
except ValueError:
raise ValueError(f"{k} is not a valid {self.arguments[k].type}.")
if self.arguments[k].choices is not None:
assert (
v in self.arguments[k].choices
), f"""{k} must be one of {self.arguments[k].choices}."""
return config
def format_arguments(self):
return str([f"{k}" for k in sorted(self.arguments.keys())])
def format_help(self):
# description + key-value pair string for each argument
help_msg = str(self.description)
return help_msg + ", available arguments: " + self.format_arguments()
def print_help(self):
# display help message
print(self.format_help())
def create_runner_config_validator():
validator = ConfigValidator(description="Runner configurations")
validator.add_argument(
"runner",
type=str,
choices=["runner_base", "runner_iter"],
help="""Runner to use. The "runner_base" uses epoch-based training while iter-based
runner runs based on iters. Default: runner_base""",
)
# add argumetns for training dataset ratios
validator.add_argument(
"train_dataset_ratios",
type=Dict[str, float],
help="""Ratios of training dataset. This is used in iteration-based runner.
Do not support for epoch-based runner because how to define an epoch becomes tricky.
Default: None""",
)
validator.add_argument(
"max_iters",
type=float,
help="Maximum number of iterations to run.",
)
validator.add_argument(
"max_epoch",
type=int,
help="Maximum number of epochs to run.",
)
# add arguments for iters_per_inner_epoch
validator.add_argument(
"iters_per_inner_epoch",
type=float,
help="Number of iterations per inner epoch. This is required when runner is runner_iter.",
)
lr_scheds_choices = registry.list_lr_schedulers()
validator.add_argument(
"lr_sched",
type=str,
choices=lr_scheds_choices,
help="Learning rate scheduler to use, from {}".format(lr_scheds_choices),
)
task_choices = registry.list_tasks()
validator.add_argument(
"task",
type=str,
choices=task_choices,
help="Task to use, from {}".format(task_choices),
)
# add arguments for init_lr
validator.add_argument(
"init_lr",
type=float,
help="Initial learning rate. This will be the learning rate after warmup and before decay.",
)
# add arguments for min_lr
validator.add_argument(
"min_lr",
type=float,
help="Minimum learning rate (after decay).",
)
# add arguments for warmup_lr
validator.add_argument(
"warmup_lr",
type=float,
help="Starting learning rate for warmup.",
)
# add arguments for learning rate decay rate
validator.add_argument(
"lr_decay_rate",
type=float,
help="Learning rate decay rate. Required if using a decaying learning rate scheduler.",
)
# add arguments for weight decay
validator.add_argument(
"weight_decay",
type=float,
help="Weight decay rate.",
)
# add arguments for training batch size
validator.add_argument(
"batch_size_train",
type=int,
help="Training batch size.",
)
# add arguments for evaluation batch size
validator.add_argument(
"batch_size_eval",
type=int,
help="Evaluation batch size, including validation and testing.",
)
# add arguments for number of workers for data loading
validator.add_argument(
"num_workers",
help="Number of workers for data loading.",
)
# add arguments for warm up steps
validator.add_argument(
"warmup_steps",
type=int,
help="Number of warmup steps. Required if a warmup schedule is used.",
)
# add arguments for random seed
validator.add_argument(
"seed",
type=int,
help="Random seed.",
)
# add arguments for output directory
validator.add_argument(
"output_dir",
type=str,
help="Output directory to save checkpoints and logs.",
)
# add arguments for whether only use evaluation
validator.add_argument(
"evaluate",
help="Whether to only evaluate the model. If true, training will not be performed.",
)
# add arguments for splits used for training, e.g. ["train", "val"]
validator.add_argument(
"train_splits",
type=list,
help="Splits to use for training.",
)
# add arguments for splits used for validation, e.g. ["val"]
validator.add_argument(
"valid_splits",
type=list,
help="Splits to use for validation. If not provided, will skip the validation.",
)
# add arguments for splits used for testing, e.g. ["test"]
validator.add_argument(
"test_splits",
type=list,
help="Splits to use for testing. If not provided, will skip the testing.",
)
# add arguments for accumulating gradient for iterations
validator.add_argument(
"accum_grad_iters",
type=int,
help="Number of iterations to accumulate gradient for.",
)
# ====== distributed training ======
validator.add_argument(
"device",
type=str,
choices=["cpu", "cuda"],
help="Device to use. Support 'cuda' or 'cpu' as for now.",
)
validator.add_argument(
"world_size",
type=int,
help="Number of processes participating in the job.",
)
validator.add_argument("dist_url", type=str)
validator.add_argument("distributed", type=bool)
# add arguments to opt using distributed sampler during evaluation or not
validator.add_argument(
"use_dist_eval_sampler",
type=bool,
help="Whether to use distributed sampler during evaluation or not.",
)
# ====== task specific ======
# generation task specific arguments
# add arguments for maximal length of text output
validator.add_argument(
"max_len",
type=int,
help="Maximal length of text output.",
)
# add arguments for minimal length of text output
validator.add_argument(
"min_len",
type=int,
help="Minimal length of text output.",
)
# add arguments number of beams
validator.add_argument(
"num_beams",
type=int,
help="Number of beams used for beam search.",
)
# vqa task specific arguments
# add arguments for number of answer candidates
validator.add_argument(
"num_ans_candidates",
type=int,
help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""",
)
# add arguments for inference method
validator.add_argument(
"inference_method",
type=str,
choices=["genearte", "rank"],
help="""Inference method to use for question answering. If rank, requires a answer list.""",
)
# ====== model specific ======
validator.add_argument(
"k_test",
type=int,
help="Number of top k most similar samples from ITC/VTC selection to be tested.",
)
return validator
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class Registry:
mapping = {
"builder_name_mapping": {},
"task_name_mapping": {},
"processor_name_mapping": {},
"model_name_mapping": {},
"lr_scheduler_name_mapping": {},
"runner_name_mapping": {},
"state": {},
"paths": {},
}
@classmethod
def register_builder(cls, name):
r"""Register a dataset builder to registry with key 'name'
Args:
name: Key with which the builder will be registered.
Usage:
from lavis.common.registry import registry
from lavis.datasets.base_dataset_builder import BaseDatasetBuilder
"""
def wrap(builder_cls):
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
assert issubclass(
builder_cls, BaseDatasetBuilder
), "All builders must inherit BaseDatasetBuilder class, found {}".format(
builder_cls
)
if name in cls.mapping["builder_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["builder_name_mapping"][name]
)
)
cls.mapping["builder_name_mapping"][name] = builder_cls
return builder_cls
return wrap
@classmethod
def register_task(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(task_cls):
from lavis.tasks.base_task import BaseTask
assert issubclass(
task_cls, BaseTask
), "All tasks must inherit BaseTask class"
if name in cls.mapping["task_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["task_name_mapping"][name]
)
)
cls.mapping["task_name_mapping"][name] = task_cls
return task_cls
return wrap
@classmethod
def register_model(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(model_cls):
from lavis.models import BaseModel
assert issubclass(
model_cls, BaseModel
), "All models must inherit BaseModel class"
if name in cls.mapping["model_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["model_name_mapping"][name]
)
)
cls.mapping["model_name_mapping"][name] = model_cls
return model_cls
return wrap
@classmethod
def register_processor(cls, name):
r"""Register a processor to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(processor_cls):
from lavis.processors import BaseProcessor
assert issubclass(
processor_cls, BaseProcessor
), "All processors must inherit BaseProcessor class"
if name in cls.mapping["processor_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["processor_name_mapping"][name]
)
)
cls.mapping["processor_name_mapping"][name] = processor_cls
return processor_cls
return wrap
@classmethod
def register_lr_scheduler(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(lr_sched_cls):
if name in cls.mapping["lr_scheduler_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["lr_scheduler_name_mapping"][name]
)
)
cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls
return lr_sched_cls
return wrap
@classmethod
def register_runner(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(runner_cls):
if name in cls.mapping["runner_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["runner_name_mapping"][name]
)
)
cls.mapping["runner_name_mapping"][name] = runner_cls
return runner_cls
return wrap
@classmethod
def register_path(cls, name, path):
r"""Register a path to registry with key 'name'
Args:
name: Key with which the path will be registered.
Usage:
from lavis.common.registry import registry
"""
assert isinstance(path, str), "All path must be str."
if name in cls.mapping["paths"]:
raise KeyError("Name '{}' already registered.".format(name))
cls.mapping["paths"][name] = path
@classmethod
def register(cls, name, obj):
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from lavis.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
# @classmethod
# def get_trainer_class(cls, name):
# return cls.mapping["trainer_name_mapping"].get(name, None)
@classmethod
def get_builder_class(cls, name):
return cls.mapping["builder_name_mapping"].get(name, None)
@classmethod
def get_model_class(cls, name):
return cls.mapping["model_name_mapping"].get(name, None)
@classmethod
def get_task_class(cls, name):
return cls.mapping["task_name_mapping"].get(name, None)
@classmethod
def get_processor_class(cls, name):
return cls.mapping["processor_name_mapping"].get(name, None)
@classmethod
def get_lr_scheduler_class(cls, name):
return cls.mapping["lr_scheduler_name_mapping"].get(name, None)
@classmethod
def get_runner_class(cls, name):
return cls.mapping["runner_name_mapping"].get(name, None)
@classmethod
def list_runners(cls):
return sorted(cls.mapping["runner_name_mapping"].keys())
@classmethod
def list_models(cls):
return sorted(cls.mapping["model_name_mapping"].keys())
@classmethod
def list_tasks(cls):
return sorted(cls.mapping["task_name_mapping"].keys())
@classmethod
def list_processors(cls):
return sorted(cls.mapping["processor_name_mapping"].keys())
@classmethod
def list_lr_schedulers(cls):
return sorted(cls.mapping["lr_scheduler_name_mapping"].keys())
@classmethod
def list_datasets(cls):
return sorted(cls.mapping["builder_name_mapping"].keys())
@classmethod
def get_path(cls, name):
return cls.mapping["paths"].get(name, None)
@classmethod
def get(cls, name, default=None, no_warning=False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for MMF's
internal operations. Default: False
"""
original_name = name
name = name.split(".")
value = cls.mapping["state"]
for subname in name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].warning(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from mmf.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not dist_utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def setup_logger():
logging.basicConfig(
level=logging.INFO if dist_utils.is_main_process() else logging.WARN,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
)
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
check_integrity,
download_file_from_google_drive,
extract_archive,
)
def now():
from datetime import datetime
return datetime.now().strftime("%Y%m%d%H%M")[:-1]
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def get_cache_path(rel_path):
return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path))
def get_abs_path(rel_path):
return os.path.join(registry.get_path("library_root"), rel_path)
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
# The following are adapted from torchvision and vissl
# torchvision: https://github.com/pytorch/vision
# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f"Error creating directory: {dir_path}")
return is_success
def get_redirected_url(url: str):
"""
Given a URL, returns the URL it redirects to or the
original URL in case of no indirection
"""
import requests
with requests.Session() as session:
with session.get(url, stream=True, allow_redirects=True) as response:
if response.history:
return response.url
else:
return url
def to_google_drive_download_url(view_url: str) -> str:
"""
Utility function to transform a view URL of google drive
to a download URL for google drive
Example input:
https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view
Example output:
https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp
"""
splits = view_url.split("/")
assert splits[-1] == "view"
file_id = splits[-2]
return f"https://drive.google.com/uc?export=download&id={file_id}"
def download_google_drive_url(url: str, output_path: str, output_file_name: str):
"""
Download a file from google drive
Downloading an URL from google drive requires confirmation when
the file of the size is too big (google drive notifies that
anti-viral checks cannot be performed on such files)
"""
import requests
with requests.Session() as session:
# First get the confirmation token and append it to the URL
with session.get(url, stream=True, allow_redirects=True) as response:
for k, v in response.cookies.items():
if k.startswith("download_warning"):
url = url + "&confirm=" + v
# Then download the content of the file
with session.get(url, stream=True, verify=True) as response:
makedir(output_path)
path = os.path.join(output_path, output_file_name)
total_size = int(response.headers.get("Content-length", 0))
with open(path, "wb") as file:
from tqdm import tqdm
with tqdm(total=total_size) as progress_bar:
for block in response.iter_content(
chunk_size=io.DEFAULT_BUFFER_SIZE
):
file.write(block)
progress_bar.update(len(block))
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(
urllib.request.Request(url, headers={"User-Agent": "vissl"})
) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def download_url(
url: str,
root: str,
filename: Optional[str] = None,
md5: Optional[str] = None,
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the basename of the URL.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir(root)
# check if file is already present locally
if check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
return
# expand redirect chain if needed
url = get_redirected_url(url)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
print("Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
print(
"Failed download. Trying https -> http instead."
" Downloading " + url + " to " + fpath
)
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def download_and_extract_archive(
url: str,
download_root: str,
extract_root: Optional[str] = None,
filename: Optional[str] = None,
md5: Optional[str] = None,
remove_finished: bool = False,
) -> None:
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def cache_url(url: str, cache_dir: str) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
parsed_url = urlparse(url)
dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/")))
makedir(dirname)
filename = url.split("/")[-1]
cached = os.path.join(dirname, filename)
with file_lock(cached):
if not os.path.isfile(cached):
logging.info(f"Downloading {url} to {cached} ...")
cached = download(url, dirname, filename=filename)
logging.info(f"URL {url} cached in {cached}")
return cached
# TODO (prigoyal): convert this into RAII-style API
def create_file_symlink(file1, file2):
"""
Simply create the symlinks for a given file1 to file2.
Useful during model checkpointing to symlinks to the
latest successful checkpoint.
"""
try:
if g_pathmgr.exists(file2):
g_pathmgr.rm(file2)
g_pathmgr.symlink(file1, file2)
except Exception as e:
logging.info(f"Could NOT create symlink. Error: {e}")
def save_file(data, filename, append_to_json=True, verbose=True):
"""
Common i/o utility to handle saving data to various file formats.
Supported:
.pkl, .pickle, .npy, .json
Specifically for .json, users have the option to either append (default)
or rewrite by passing in Boolean value to append_to_json.
"""
if verbose:
logging.info(f"Saving data to file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "wb") as fopen:
pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
elif file_ext == ".npy":
with g_pathmgr.open(filename, "wb") as fopen:
np.save(fopen, data)
elif file_ext == ".json":
if append_to_json:
with g_pathmgr.open(filename, "a") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
else:
with g_pathmgr.open(filename, "w") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "w") as fopen:
dump = yaml.dump(data)
fopen.write(dump)
fopen.flush()
else:
raise Exception(f"Saving {file_ext} is not supported yet")
if verbose:
logging.info(f"Saved data to file: {filename}")
def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False):
"""
Common i/o utility to handle loading data from various file formats.
Supported:
.pkl, .pickle, .npy, .json
For the npy files, we support reading the files in mmap_mode.
If the mmap_mode of reading is not successful, we load data without the
mmap_mode.
"""
if verbose:
logging.info(f"Loading data from file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext == ".txt":
with g_pathmgr.open(filename, "r") as fopen:
data = fopen.readlines()
elif file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "rb") as fopen:
data = pickle.load(fopen, encoding="latin1")
elif file_ext == ".npy":
if mmap_mode:
try:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(
fopen,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
except ValueError as e:
logging.info(
f"Could not mmap {filename}: {e}. Trying without g_pathmgr"
)
data = np.load(
filename,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
logging.info("Successfully loaded without g_pathmgr")
except Exception:
logging.info("Could not mmap without g_pathmgr. Trying without mmap")
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
else:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
elif file_ext == ".json":
with g_pathmgr.open(filename, "r") as fopen:
data = json.load(fopen)
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "r") as fopen:
data = yaml.load(fopen, Loader=yaml.FullLoader)
elif file_ext == ".csv":
with g_pathmgr.open(filename, "r") as fopen:
data = pd.read_csv(fopen)
else:
raise Exception(f"Reading from {file_ext} is not supported yet")
return data
def abspath(resource_path: str):
"""
Make a path absolute, but take into account prefixes like
"http://" or "manifold://"
"""
regex = re.compile(r"^\w+://")
if regex.match(resource_path) is None:
return os.path.abspath(resource_path)
else:
return resource_path
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
logging.info(f"Error creating directory: {dir_path}")
return is_success
def is_url(input_url):
"""
Check if an input string is a url. look for http(s):// and ignoring the case
"""
is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None
return is_url
def cleanup_dir(dir):
"""
Utility for deleting a directory. Useful for cleaning the storage space
that contains various training artifacts like checkpoints, data etc.
"""
if os.path.exists(dir):
logging.info(f"Deleting directory: {dir}")
shutil.rmtree(dir)
logging.info(f"Deleted contents of directory: {dir}")
def get_file_size(filename):
"""
Given a file, get the size of file in MB
"""
size_in_mb = os.path.getsize(filename) / float(1024**2)
return size_in_mb
|
def getAttMap(img, attMap, blur=True, overlap=True):
attMap -= attMap.min()
if attMap.max() > 0:
attMap /= attMap.max()
attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant")
if blur:
attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2]))
attMap -= attMap.min()
attMap /= attMap.max()
cmap = plt.get_cmap("jet")
attMapV = cmap(attMap)
attMapV = np.delete(attMapV, 3, 2)
if overlap:
attMap = (
1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img
+ (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV
)
return attMap
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}, world {}): {}".format(
args.rank, args.world_size, args.dist_url
),
flush=True,
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
timeout=datetime.timedelta(
days=365
), # allow auto-downloading and de-compressing
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def get_dist_info():
if torch.__version__ < "1.0":
initialized = dist._initialized
else:
initialized = dist.is_initialized()
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else: # non-distributed training
rank = 0
world_size = 1
return rank, world_size
def main_process(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
__author__ = "aagrawal"
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# coding=utf-8
__author__ = "aagrawal"
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py).
class VQAEval:
def __init__(self, vqa=None, vqaRes=None, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
if vqa is not None:
self.params = {"question_id": vqa.getQuesIds()}
self.contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
self.manualMap = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
self.articles = ["a", "an", "the"]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(,)(\d)")
self.punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params["question_id"]]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print("computing accuracy")
step = 0
for quesId in quesIds:
resAns = res[quesId]["answer"]
resAns = resAns.replace("\n", " ")
resAns = resAns.replace("\t", " ")
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]["answers"]:
ansDic["answer"] = self.processPunctuation(ansDic["answer"])
for gtAnsDatum in gts[quesId]["answers"]:
otherGTAns = [
item for item in gts[quesId]["answers"] if item != gtAnsDatum
]
matchingAns = [item for item in otherGTAns if item["answer"] == resAns]
acc = min(1, float(len(matchingAns)) / 3)
gtAcc.append(acc)
quesType = gts[quesId]["question_type"]
ansType = gts[quesId]["answer_type"]
avgGTAcc = float(sum(gtAcc)) / len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step % 100 == 0:
self.updateProgress(step / float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + " " in inText or " " + p in inText) or (
re.search(self.commaStrip, inText) != None
):
outText = outText.replace(p, "")
else:
outText = outText.replace(p, " ")
outText = self.periodStrip.sub("", outText, re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = " ".join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n)
self.accuracy["perQuestionType"] = {
quesType: round(
100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]),
self.n,
)
for quesType in accQuesType
}
self.accuracy["perAnswerType"] = {
ansType: round(
100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n
)
for ansType in accAnsType
}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100 * acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100 * acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100 * acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength * progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format(
"#" * block + "-" * (barLength - block), int(progress * 100), status
)
sys.stdout.write(text)
sys.stdout.flush()
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
__author__ = "aagrawal"
__version__ = "0.9"
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print("loading VQA annotations and questions into memory...")
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, "r"))
questions = json.load(open(question_file, "r"))
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print("creating index...")
imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]}
qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
for ann in self.dataset["annotations"]:
imgToQA[ann["image_id"]] += [ann]
qa[ann["question_id"]] = ann
for ques in self.questions["questions"]:
qqa[ques["question_id"]] = ques
print("index created!")
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.datset["info"].items():
print("%s: %s" % (key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(imgIds) == 0:
anns = sum(
[self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],
[],
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["question_id"] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(quesIds) == 0:
anns = sum(
[self.qa[quesId] for quesId in quesIds if quesId in self.qa], []
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["image_id"] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann["question_id"]
print("Question: %s" % (self.qqa[quesId]["question"]))
for ans in ann["answers"]:
print("Answer %d: %s" % (ans["answer_id"], ans["answer"]))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset["info"] = copy.deepcopy(self.questions["info"])
res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"])
res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"])
res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"])
res.dataset["license"] = copy.deepcopy(self.questions["license"])
print("Loading and preparing results... ")
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, "results is not an array of objects"
annsQuesIds = [ann["question_id"] for ann in anns]
assert set(annsQuesIds) == set(
self.getQuesIds()
), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file."
for ann in anns:
quesId = ann["question_id"]
if res.dataset["task_type"] == "Multiple Choice":
assert (
ann["answer"] in self.qqa[quesId]["multiple_choices"]
), "predicted answer is not one of the multiple choices"
qaAnn = self.qa[quesId]
ann["image_id"] = qaAnn["image_id"]
ann["question_type"] = qaAnn["question_type"]
ann["answer_type"] = qaAnn["answer_type"]
print(
"DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())
)
res.dataset["annotations"] = anns
res.createIndex()
return res
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
__all__ = ["RunnerBase", "RunnerIter"]
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
download_cached_file,
get_rank,
get_world_size,
is_main_process,
main_process,
)
IterLoader,
MultiIterLoader,
PrefetchLoader,
)
@registry.register_runner("runner_base")
class RunnerBase:
"""
A runner class to train and evaluate a model given a task and datasets.
The runner uses pytorch distributed data parallel by default. Future release
will support other distributed frameworks.
"""
def __init__(self, cfg, task, model, datasets, job_id):
self.config = cfg
self.job_id = job_id
self.task = task
self.datasets = datasets
self._model = model
self._wrapped_model = None
self._device = None
self._optimizer = None
self._scaler = None
self._dataloaders = None
self._lr_sched = None
self.start_epoch = 0
# self.setup_seeds()
self.setup_output_dir()
@property
def device(self):
if self._device is None:
self._device = torch.device(self.config.run_cfg.device)
return self._device
@property
def use_distributed(self):
return self.config.run_cfg.distributed
@property
def model(self):
"""
A property to get the DDP-wrapped model on the device.
"""
# move model to device
if self._model.device != self.device:
self._model = self._model.to(self.device)
# distributed training wrapper
if self.use_distributed:
if self._wrapped_model is None:
self._wrapped_model = DDP(
self._model, device_ids=[self.config.run_cfg.gpu]
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
# TODO make optimizer class and configurations
if self._optimizer is None:
self._optimizer = torch.optim.AdamW(
params=self.model.parameters(),
lr=float(self.config.run_cfg.init_lr),
weight_decay=float(self.config.run_cfg.weight_decay),
)
return self._optimizer
@property
def scaler(self):
amp = self.config.run_cfg.get("amp", False)
if amp:
if self._scaler is None:
self._scaler = torch.cuda.amp.GradScaler()
return self._scaler
@property
def lr_scheduler(self):
"""
A property to get and create learning rate scheduler by split just in need.
"""
if self._lr_sched is None:
lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
# max_epoch = self.config.run_cfg.max_epoch
max_epoch = self.max_epoch
# min_lr = self.config.run_cfg.min_lr
min_lr = self.min_lr
# init_lr = self.config.run_cfg.init_lr
init_lr = self.init_lr
# optional parameters
decay_rate = self.config.run_cfg.get("lr_decay_rate", None)
warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1)
warmup_steps = self.config.run_cfg.get("warmup_steps", 0)
self._lr_sched = lr_sched_cls(
optimizer=self.optimizer,
max_epoch=max_epoch,
min_lr=min_lr,
init_lr=init_lr,
decay_rate=decay_rate,
warmup_start_lr=warmup_start_lr,
warmup_steps=warmup_steps,
)
return self._lr_sched
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
if hasattr(self.datasets[split_name], "__len__"):
# a single map-style dataset
num_records = len(self.datasets[split_name])
else:
# a single wds.DataPipeline
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
@property
def cuda_enabled(self):
return self.device.type == "cuda"
@property
def max_epoch(self):
return int(self.config.run_cfg.max_epoch)
@property
def log_freq(self):
log_freq = self.config.run_cfg.get("log_freq", 50)
return int(log_freq)
@property
def init_lr(self):
return float(self.config.run_cfg.init_lr)
@property
def min_lr(self):
return float(self.config.run_cfg.min_lr)
@property
def accum_grad_iters(self):
return int(self.config.run_cfg.get("accum_grad_iters", 1))
@property
def valid_splits(self):
valid_splits = self.config.run_cfg.get("valid_splits", [])
if len(valid_splits) == 0:
logging.info("No validation splits found.")
return valid_splits
@property
def test_splits(self):
test_splits = self.config.run_cfg.get("test_splits", [])
return test_splits
@property
def train_splits(self):
train_splits = self.config.run_cfg.get("train_splits", [])
if len(train_splits) == 0:
logging.info("Empty train splits.")
return train_splits
@property
def evaluate_only(self):
"""
Set to True to skip training.
"""
return self.config.run_cfg.evaluate
@property
def use_dist_eval_sampler(self):
return self.config.run_cfg.get("use_dist_eval_sampler", True)
@property
def resume_ckpt_path(self):
return self.config.run_cfg.get("resume_ckpt_path", None)
@property
def train_loader(self):
train_dataloader = self.dataloaders["train"]
return train_dataloader
def setup_output_dir(self):
lib_root = Path(registry.get_path("library_root"))
output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id
result_dir = output_dir / "result"
output_dir.mkdir(parents=True, exist_ok=True)
result_dir.mkdir(parents=True, exist_ok=True)
registry.register_path("result_dir", str(result_dir))
registry.register_path("output_dir", str(output_dir))
self.result_dir = result_dir
self.output_dir = output_dir
def train(self):
start_time = time.time()
best_agg_metric = 0
best_epoch = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for cur_epoch in range(self.start_epoch, self.max_epoch):
# training phase
if not self.evaluate_only:
logging.info("Start training")
train_stats = self.train_epoch(cur_epoch)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_epoch, best_agg_metric = cur_epoch, agg_metrics
self._save_checkpoint(cur_epoch, is_best=True)
val_log.update({"best_epoch": best_epoch})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each epoch.
if not self.evaluate_only:
self._save_checkpoint(cur_epoch, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch
self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def evaluate(self, cur_epoch="best", skip_reload=False):
test_logs = dict()
if len(self.test_splits) > 0:
for split_name in self.test_splits:
test_logs[split_name] = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload
)
return test_logs
def train_epoch(self, epoch):
# train
self.model.train()
return self.task.train_epoch(
epoch=epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@torch.no_grad()
def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
"""
Evaluate the model on a given split.
Args:
split_name (str): name of the split to evaluate on.
cur_epoch (int): current epoch.
skip_reload_best (bool): whether to skip reloading the best checkpoint.
During training, we will reload the best checkpoint for validation.
During testing, we will use provided weights and skip reloading the best checkpoint .
"""
data_loader = self.dataloaders.get(split_name, None)
assert data_loader, "data_loader for split {} is None.".format(split_name)
# TODO In validation, you need to compute loss as well as metrics
# TODO consider moving to model.before_evaluation()
model = self.unwrap_dist_model(self.model)
if not skip_reload and cur_epoch == "best":
model = self._reload_best_model(model)
model.eval()
self.task.before_evaluation(
model=model,
dataset=self.datasets[split_name],
)
results = self.task.evaluation(model, data_loader)
if results is not None:
return self.task.after_evaluation(
val_result=results,
split_name=split_name,
epoch=cur_epoch,
)
def unwrap_dist_model(self, model):
if self.use_distributed:
return model.module
else:
return model
def create_loaders(
self,
datasets,
num_workers,
batch_sizes,
is_trains,
collate_fns,
dataset_ratios=None,
):
"""
Create dataloaders for training and validation.
"""
def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
# create a single dataloader for each split
if isinstance(dataset, ChainDataset) or isinstance(
dataset, wds.DataPipeline
):
# wds.WebdDataset instance are chained together
# webdataset.DataPipeline has its own sampler and collate_fn
loader = iter(
DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
)
)
else:
# map-style dataset are concatenated together
# setup distributed sampler
if self.use_distributed:
sampler = DistributedSampler(
dataset,
shuffle=is_train,
num_replicas=get_world_size(),
rank=get_rank(),
)
if not self.use_dist_eval_sampler:
# e.g. retrieval evaluation
sampler = sampler if is_train else None
else:
sampler = None
loader = DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
sampler=sampler,
shuffle=sampler is None and is_train,
collate_fn=collate_fn,
drop_last=True if is_train else False,
)
loader = PrefetchLoader(loader)
if is_train:
loader = IterLoader(loader, use_distributed=self.use_distributed)
return loader
loaders = []
for dataset, bsz, is_train, collate_fn in zip(
datasets, batch_sizes, is_trains, collate_fns
):
if isinstance(dataset, list) or isinstance(dataset, tuple):
loader = MultiIterLoader(
loaders=[
_create_loader(d, num_workers, bsz, is_train, collate_fn[i])
for i, d in enumerate(dataset)
],
ratios=dataset_ratios,
)
else:
loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
loaders.append(loader)
return loaders
@main_process
def _save_checkpoint(self, cur_epoch, is_best=False):
"""
Save the checkpoint at the current epoch.
"""
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"epoch": cur_epoch,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_epoch),
)
logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to))
torch.save(save_obj, save_to)
def _reload_best_model(self, model):
"""
Load the best checkpoint for evaluation.
"""
checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth")
logging.info("Loading checkpoint from {}.".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=self.device)
model.load_state_dict(checkpoint["model"])
return model
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_epoch = checkpoint["epoch"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@main_process
def log_stats(self, stats, split_name):
if isinstance(stats, dict):
log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}}
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
elif isinstance(stats, list):
pass
@main_process
def log_config(self):
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_runner("runner_iter")
class RunnerIter(RunnerBase):
"""
Run training based on the number of iterations. This is common when
the training dataset size is large. Underhood logic is similar to
epoch-based training by considering every #iters_per_inner_epoch as an
inner epoch.
In iter-based runner, after every #iters_per_inner_epoch steps, we
1) do a validation epoch;
2) schedule the learning rate;
3) save the checkpoint.
We refer every #iters_per_inner_epoch steps as an inner epoch.
"""
def __init__(self, cfg, task, model, datasets, job_id):
super().__init__(cfg, task, model, datasets, job_id)
self.start_iters = 0
self.max_iters = int(self.config.run_cfg.get("max_iters", -1))
assert self.max_iters > 0, "max_iters must be greater than 0."
self.iters_per_inner_epoch = int(
self.config.run_cfg.get("iters_per_inner_epoch", -1)
)
assert (
self.iters_per_inner_epoch > 0
), "iters_per_inner_epoch must be greater than 0."
@property
def max_epoch(self):
return int(self.max_iters / self.iters_per_inner_epoch)
@property
def cur_epoch(self):
try:
return self.train_loader.epoch
except AttributeError:
# pipeline data (e.g. LAION) is streaming, have no concept of epoch
return 0
def _progress(self, cur_iters):
return "{}_iters={}".format(self.cur_epoch, cur_iters)
def train(self):
start_time = time.time()
best_agg_metric = 0
best_iters = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for start_iters in range(
self.start_iters, self.max_iters, self.iters_per_inner_epoch
):
end_iters = start_iters + self.iters_per_inner_epoch
# training phase
if not self.evaluate_only:
logging.info(
"Start training, max_iters={}, in total {} inner epochs.".format(
self.max_iters, int(self.max_iters / self.iters_per_inner_epoch)
)
)
train_stats = self.train_iters(self.cur_epoch, start_iters)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=self._progress(end_iters)
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
else:
# create multi-loader with the provided ratios, without concatenating or chaining
missing_keys = [k for k in dataset_ratios if k not in self.datasets]
if len(missing_keys) > 0:
raise ValueError(
"Datasets with the following split names are not found: {}".format(
missing_keys
)
)
unexpected_keys = [k for k in self.datasets if k not in dataset_ratios]
if len(unexpected_keys) > 0:
raise ValueError(
"Datasets with the following split names are not expected: {}".format(
unexpected_keys
)
)
dataset_ratios = [float(dataset_ratios[k]) for k in self.datasets]
self.datasets = reorg_datasets_by_split(self.datasets)
# to keep the same structure as return value of concat_datasets
self.datasets = {
k: v[0] if len(v) == 1 else v for k, v in datasets.items()
}
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
try:
# a single map-style dataset
num_records = len(self.datasets[split_name])
except TypeError:
# a single wds.DataPipeline or ChainDataset
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
|
class Pos2Map:
def __init__(self, x, y, heading) -> None:
self.x = x
self.y = y
self.heading = heading
class Pos2World:
def __init__(self, x, y, z, heading) -> None:
self.x = x
self.y = y
self.z = z
self.heading = heading
class Geometry_Tools:
def __init__(self, image_resolution, fov, camera_height) -> None:
self._camera_matrix = self._parse_camera_matrix(*image_resolution, fov)
self._camera_height = camera_height
def _parse_r_matrix(self, ax_, angle):
ax = ax_ / np.linalg.norm(ax_)
if np.abs(angle) > 0.001:
S_hat = np.array([[0.0, -ax[2], ax[1]], [ax[2], 0.0, -ax[0]], [-ax[1], ax[0], 0.0]], dtype=np.float32)
R = np.eye(3) + np.sin(angle) * S_hat + (1 - np.cos(angle)) * (np.linalg.matrix_power(S_hat, 2))
else:
R = np.eye(3)
return R
def _parse_camera_matrix(self, width, height, fov):
xc = (width - 1.0) / 2.0
zc = (height - 1.0) / 2.0
f = (width / 2.0) / np.tan(np.deg2rad(fov / 2.0))
camera_matrix = {"xc": xc, "zc": zc, "f": f}
camera_matrix = Namespace(**camera_matrix)
return camera_matrix
def transformation_robot2world(self, goal2robot2world: list, pos2world: Pos2World) -> list:
"""transform the point relative to robot to the point relative to th world
Args:
goal2robot2world (list): = [u, v] u is in the right relative to the robot and v is in the forward of th robot
robot first moves according to the v frame and finally u frame
pos2world (Pos2World): _description_
Returns:
list: _description_
"""
u, v = goal2robot2world
x0, y0, z0 = pos2world.x, pos2world.z, pos2world.z
x1 = x0 + v * np.cos(pos2world.heading + np.pi / 2)
z1 = -(-z0 + v * np.sin(pos2world.heading + np.pi / 2))
x2 = x1 + u * np.cos(pos2world.heading + np.pi / 2 - np.pi / 2)
z2 = -(-z1 + u * np.sin(pos2world.heading + np.pi / 2 - np.pi / 2))
return [x2, y0, z2]
def transformation_robotbase2map(
self, point_clouds_2robotbase: np.array, pos2map: Pos2Map, resolution_meter2pixel
) -> np.array:
"""Mapping the points with the robot base as the coordinate system to the map coordinate system
Args:
point_clouds_2robotbase (np.array):
pos2map (Pos2Map):
resolution_meter2pixel (_type_):
Returns:
np.array: point_clouds_2map
"""
R = self._parse_r_matrix([0.0, 0.0, 1.0], angle=pos2map.heading - np.pi / 2.0)
point_clouds_2map = np.matmul(point_clouds_2robotbase.reshape(-1, 3), R.T).reshape(
point_clouds_2robotbase.shape
)
point_clouds_2map[:, :, 0] = point_clouds_2map[:, :, 0] + pos2map.x * resolution_meter2pixel
point_clouds_2map[:, :, 1] = point_clouds_2map[:, :, 1] + pos2map.y * resolution_meter2pixel
return point_clouds_2map
def transformation_robotcamera2base(self, point_clouds: np.array) -> np.array:
"""Mapping the points with the robot camera as the coordinate system to the robot base coordinate system
Args:
point_clouds (np.array): In shape (width, height, 3);
point_clouds[0] means X cooardinate point_clouds[1] means Y cooardinate point_clouds[2] means Z cooardinate
Returns:
np.array: Array of point clouds relative to the robot base coordinate system; In shape (width, height, 3)
"""
point_clouds[..., 2] = point_clouds[..., 2] + self._camera_height
return point_clouds
def transformation_camera2robotcamera(self, depth_img: np.array) -> np.array:
"""Mapping the points on the depth map to points with the robot camera as the coordinate system
Args:
depth_img (np.array): In shape (width, height, 1); The unit of pixel value is 10 meters
Returns:
np.array: Array of point clouds relative to the robot camera coordinate system; In shape (width, height, 3)
"""
x, z = np.meshgrid(np.arange(depth_img.shape[-2]), np.arange(depth_img.shape[-3] - 1, -1, -1))
for _ in range(depth_img.ndim - 3):
x = np.expand_dims(x, axis=0)
z = np.expand_dims(z, axis=0)
X = (x - self._camera_matrix.xc) * depth_img[:, :, 0] / self._camera_matrix.f
# print(depth_img)
Z = (z - self._camera_matrix.zc) * depth_img[:, :, 0] / self._camera_matrix.f
pc = np.concatenate((X[..., np.newaxis], depth_img, Z[..., np.newaxis]), axis=2)
return pc
def transformation_pointcloud2occupiedmap(
self, point_clouds_2map: np.array, map_size, z_bins: list, resolution_meter2pixel, free_index, occupied_index
) -> np.array:
"""project the point cloud relative to the map coordinate system to the top view
Args:
point_clouds_2map (np.array):
map_size (_type_):
z_bins (list): a list of values utilizing a height parameter to segment the point clouds of occupied and free
resolution_meter2pixel (_type_):
free_index (_type_): representative values of navigable areas on the map
occupied_index (_type_): representative values of obstacle areas on the map
Returns:
np.array: top down map in shape (map_size, map_size)
"""
n_z_bins = len(z_bins) + 1
isnotnan = np.logical_not(np.isnan(point_clouds_2map[:, :, 1]))
# transform points meter to pixel
X_bin = np.round(point_clouds_2map[:, :, 0] / resolution_meter2pixel).astype(np.int32)
Y_bin = np.round(point_clouds_2map[:, :, 1] / resolution_meter2pixel).astype(np.int32)
""" function explaination
np.digitize : split the point according to the z_bins
example:
z_bins = [1] ; points that lower than 1 is 0 else 1
"""
Z_bin = np.digitize(point_clouds_2map[:, :, 2], bins=z_bins).astype(np.int32)
# filter out the points outside the map and nan
isvalid = np.array(
[X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, Z_bin >= 0, Z_bin < n_z_bins, isnotnan]
)
isvalid = np.all(isvalid, axis=0)
ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
ind[np.logical_not(isvalid)] = 0
indr = ind.ravel()
isvalidr = isvalid.ravel().astype(np.int32)
count = np.bincount(indr, isvalidr, minlength=map_size * map_size * n_z_bins)
count = count[: map_size * map_size * n_z_bins]
count = np.reshape(count, [map_size, map_size, n_z_bins])
map = np.zeros((count.shape[0], count.shape[1]))
free_mask = count[:, :, 0] > 0
map[free_mask] = free_index
occupied_mask = count[:, :, 1] > 0
map[occupied_mask] = occupied_index
return map
def transformation_quatrtnion2heading(self, rotation: quaternion):
quat = quaternion_to_list(rotation)
q = R.from_quat(quat)
heading = q.as_rotvec()[1]
return heading
def transformation_pointcloud2semanticmap(
self, point_clouds_2map: np.array, map_size, z_bins: list, resolution_meter2pixel, free_index, semantic_obs
) -> np.array:
"""project the point cloud relative to the map coordinate system to the top view
Args:
point_clouds_2map (np.array):
map_size (_type_):
z_bins (list): a list of values utilizing a height parameter to segment the point clouds of occupied and free
resolution_meter2pixel (_type_):
free_index (_type_): representative values of navigable areas on the map
semantic_obs (_type_): representative values of obstacle areas on the map, the shape is in (depyh_img.shape[0], depyh_img.shape[1])
Returns:
np.array: top down map in shape (map_size, map_size)
"""
n_z_bins = len(z_bins) + 1
isnotnan = np.logical_not(np.isnan(point_clouds_2map[:, :, 1]))
# transform points meter to pixel
X_bin = np.round(point_clouds_2map[:, :, 0] / resolution_meter2pixel).astype(np.int32)
Y_bin = np.round(point_clouds_2map[:, :, 1] / resolution_meter2pixel).astype(np.int32)
""" function explaination
np.digitize : split the point according to the z_bins
example:
z_bins = [1] ; points that lower than 1 is 0 else 1
"""
Z_bin = np.digitize(point_clouds_2map[:, :, 2], bins=z_bins).astype(np.int32)
# filter out the points outside the map and nan
isvalid = np.array(
[X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, Z_bin >= 0, Z_bin < n_z_bins, isnotnan]
)
isvalid = np.all(isvalid, axis=0)
ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
ind[np.logical_not(isvalid)] = 0
indr = ind.ravel()
isvalidr = isvalid.ravel().astype(np.int32)
count = np.bincount(indr, isvalidr, minlength=map_size * map_size * n_z_bins)
count = count[: map_size * map_size * n_z_bins]
count = np.reshape(count, [map_size, map_size, n_z_bins])
map = np.zeros((count.shape[0], count.shape[1]))
free_mask = count[:, :, 0] > 0
occupied_mask = count[:, :, 1] > 0
for y in range(X_bin.shape[0]):
for x in range(X_bin.shape[1]):
if Y_bin[y, x] >= 0 and Y_bin[y, x] < map_size and X_bin[y, x] >= 0 and X_bin[y, x] < map_size:
if occupied_mask[Y_bin[y, x], X_bin[y, x]]:
map[Y_bin[y, x], X_bin[y, x]] = semantic_obs[y, x]
elif free_mask[Y_bin[y, x], X_bin[y, x]]:
map[Y_bin[y, x], X_bin[y, x]] = free_index
return map
class Mode_Selector:
def __init__(self) -> None:
pass
class Action_Space:
move_forward = 1
turn_left = 2
turn_right = 3
class Application(Geometry_Tools):
def __init__(
self,
image_resolution,
fov,
depth_threshold,
resolution_meter2pixel,
map_size,
camera_height,
free_index,
occupied_index,
) -> None:
super().__init__(image_resolution, fov, camera_height)
self._resolution_meter2pixel = resolution_meter2pixel
self._depth_threshold = depth_threshold
self._map_size = map_size
self.pos2map = Pos2Map(self._map_size / 2 + 1, self._map_size / 2 + 1, 0)
self.pos2world = Pos2World(None, None, None, None)
self._free_index = free_index
self._occupied_index = occupied_index
def parse_semantic_pointclouds(self, depth_img: np.array, semantic_obs: np.array, semantic_anno):
"""Parse the point cloud dictionary with semantic annotation and the average coordinate dictionary of each
semantically annotated object in the robot camera coordinate system
Args:
depth_img (np.array): In shape (width, depth, 1)
semantic_obs (np.array): In shape (width, depth)
semantic_anno (_type_): _description_
Returns:
mapping_semantic: dictionary of all points corresponding to each label in the semantic_obs
occupied_pc: dictionary of average points corresponding to each label in the semantic_obs
"""
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# label each pixel in semantic_obs
## TODO:解决相同物体不同index但是同一个label的问题
mapping_semantic = {}
for row in range(semantic_obs.shape[0]):
for col in range(semantic_obs.shape[1]):
label = semantic_anno[semantic_obs[row, col]]
if not label in mapping_semantic.keys():
mapping_semantic[label] = [point_clouds_2robotcamera[row, col]]
else:
mapping_semantic[label].append(point_clouds_2robotcamera[row, col])
# remove the label that less than 50 pixels and unusual label
occupied_pc = {}
for k, v in mapping_semantic.items():
if len(v) < 50:
continue
elif k in ["floor", "ceiling", "misc", "wall", "objects", "void"]:
continue
else:
occupied_pc[k] = (sum(v) / len(v)).tolist()
return mapping_semantic, occupied_pc
def parse_depth_topdownmap(self, depth_img: np.array) -> np.array:
"""project depth image into the top down map
Args:
depth_img (np.array): in shape (width, height, 1)
Returns:
np.array: map in shape (map_size, map_size) which value 0 stands for unknow space,
self._free_index stands for free space, self._occupied_index stands for occupied space
"""
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# parse point clouds relative to the robot base coordinate system
point_clouds_2robotbase = self.transformation_robotcamera2base(point_clouds_2robotcamera)
# parse point clouds relative to the map coordinate system
point_clouds_2map = self.transformation_robotbase2map(
point_clouds_2robotbase, self.pos2map, self._resolution_meter2pixel
)
# project the point clouds relative to the map coordinate system to top down map
occupied_map = self.transformation_pointcloud2occupiedmap(
point_clouds_2map,
self._map_size,
[self._camera_height],
self._resolution_meter2pixel,
self._free_index,
self._occupied_index,
)
return occupied_map
def parse_semantic_topdownmap(self, depth_img: np.array, semantic_img: np.array) -> np.array:
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# parse point clouds relative to the robot base coordinate system
point_clouds_2robotbase = self.transformation_robotcamera2base(point_clouds_2robotcamera)
# parse point clouds relative to the map coordinate system
point_clouds_2map = self.transformation_robotbase2map(
point_clouds_2robotbase, self.pos2map, self._resolution_meter2pixel
)
# project the point clouds relative to the map coordinate system to top down map
semantic_map = self.transformation_pointcloud2semanticmap(
point_clouds_2map,
self._map_size,
[self._camera_height],
self._resolution_meter2pixel,
self._free_index,
semantic_img,
)
return semantic_map
def update_pos2map_by_action(self, forward_step2tenmeter, turn_angle2degree, action) -> None:
if action == Action_Space.move_forward:
self.pos2map.x = (
self.pos2map.x + forward_step2tenmeter * np.cos(self.pos2map.heading) / self._resolution_meter2pixel
)
self.pos2map.y = (
self.pos2map.y + forward_step2tenmeter * np.sin(self.pos2map.heading) / self._resolution_meter2pixel
)
elif action == Action_Space.turn_left:
self.pos2map.heading = self.pos2map.heading + turn_angle2degree * np.pi / 180.0
elif action == Action_Space.turn_right:
self.pos2map.heading = self.pos2map.heading - turn_angle2degree * np.pi / 180.0
if self.pos2map.heading > np.pi * 2:
self.pos2map.heading -= np.pi * 2
elif self.pos2map.heading < 0:
self.pos2map.heading += np.pi * 2
def update_pos2map_by_cooardinate(self, tgt_pos2world: list = None, tgt_rot2world: quaternion = None) -> None:
"""_summary_
Args:
tgt_pos2world (list, optional): _description_. Defaults to None.
tgt_rot2world (quaternion)
tgt_heading2world (_type_, optional): in radius. Defaults to None.
"""
if not tgt_rot2world is None:
tgt_heading2world = self.transformation_quatrtnion2heading(tgt_rot2world)
if tgt_heading2world > np.pi * 2:
tgt_heading2world -= np.pi * 2
elif tgt_heading2world < 0:
tgt_heading2world += np.pi * 2
if self.pos2world.x is None:
self.pos2world.x = tgt_pos2world[0]
self.pos2world.y = tgt_pos2world[1]
self.pos2world.z = tgt_pos2world[2]
self.pos2world.heading = tgt_heading2world
else:
if not tgt_pos2world is None and not (
abs(tgt_pos2world[0] - self.pos2world.x) + abs(tgt_pos2world[2] - self.pos2world.z) < 0.001
):
xt, yt, zt = tgt_pos2world
delta_heading2world = np.arctan((xt - self.pos2world.x) / (zt - self.pos2world.z))
delta_heading2world = (
delta_heading2world
if (self.pos2world.heading < np.pi / 2 or self.pos2world.heading > np.pi * 3 / 2)
else delta_heading2world + np.pi
)
delta_distance2map = (
np.linalg.norm([(xt - self.pos2world.x) / 10, (zt - self.pos2world.z) / 10])
/ self._resolution_meter2pixel
)
delta_heading2curheading = delta_heading2world - self.pos2world.heading
delta_heading2map = delta_heading2curheading + self.pos2map.heading
self.pos2map.x = self.pos2map.x + np.cos(delta_heading2map) * delta_distance2map
self.pos2map.y = self.pos2map.y + np.sin(delta_heading2map) * delta_distance2map
self.pos2world.x = xt
self.pos2world.y = yt
self.pos2world.z = zt
if not tgt_heading2world is None:
delta_heading2world = tgt_heading2world - self.pos2world.heading
self.pos2world.heading = tgt_heading2world
if self.pos2world.heading > np.pi * 2:
self.pos2world.heading -= np.pi * 2
elif self.pos2world.heading < 0:
self.pos2world.heading += np.pi * 2
self.pos2map.heading += delta_heading2world
if self.pos2map.heading > np.pi * 2:
self.pos2map.heading -= np.pi * 2
elif self.pos2map.heading < 0:
self.pos2map.heading += np.pi * 2
def update_occupied_map(self, new_occupied_map, old_occupied_map):
mask_free_reigon = new_occupied_map == self._free_index
old_occupied_map[mask_free_reigon] = self._free_index
mask_occupied_reigon = new_occupied_map == self._occupied_index
old_occupied_map[mask_occupied_reigon] = self._occupied_index
return old_occupied_map
def update_semantic_map(self, new_semantic_map, old_semantic_map):
mask = new_semantic_map > 0
for y in range(old_semantic_map.shape[0]):
for x in range(old_semantic_map.shape[1]):
if mask[y, x]:
old_semantic_map[y, x] = new_semantic_map[y, x]
return old_semantic_map
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def robot2world(position, u, v, heading):
x0, y0, z0 = position
x1 = x0 + v * np.cos(heading + np.pi / 2)
z1 = -(-z0 + v * np.sin(heading + np.pi / 2))
x2 = x1 + u * np.cos(heading + np.pi / 2 - np.pi / 2)
z2 = -(-z1 + u * np.sin(heading + np.pi / 2 - np.pi / 2))
return [x2, y0, z2]
def transformation_quatrtnion2heading(rotation: quaternion):
quat = quaternion_to_list(rotation)
q = R.from_quat(quat)
heading = q.as_rotvec()[1]
return heading
def main(room_name, data_root_dir, depth_dir, feat_dir, sample_num):
feature_dir = os.path.join(feat_dir, room_name)
data_dir = os.path.join(data_root_dir, room_name)
try:
if (
os.path.exists(os.path.join(data_dir, "pcd_feat.pt"))
and torch.load(os.path.join(data_dir, "pcd_feat.pt")).shape[0] > 0
):
return
except:
pass
depth_dir = os.path.join(depth_dir, room_name)
api = Application((512, 512), 90, 1, 0.005, 600, 1.5, 1, 2)
pc_pos = []
pc_feat = []
from tqdm import tqdm
for file in tqdm(os.listdir(feature_dir)):
try:
feature_map = torch.load(os.path.join(feature_dir, file)).detach().cpu().numpy()
except:
continue
pose_file = json.load(open(os.path.join(data_dir, file.replace(".pt", ".json"))))
house_name = room_name.split("_")[0]
ky = room_name.split("_")[1]
bbox = json.load(open(os.path.join("room_bboxes_with_walls_revised_axis", house_name + ".json")))[ky]
min_x = bbox[0][0]
min_y = bbox[0][1]
min_z = bbox[0][2]
max_x = bbox[1][0]
max_y = bbox[1][1]
max_z = bbox[1][2]
rotation_0 = pose_file["rotation"][0]
rotation_1 = pose_file["rotation"][1]
rotation_2 = pose_file["rotation"][2]
rotation_3 = pose_file["rotation"][3]
position = pose_file["translation"]
heading = transformation_quatrtnion2heading(np.quaternion(rotation_0, rotation_1, rotation_2, rotation_3))
if heading > np.pi * 2:
heading -= np.pi * 2
elif heading < 0:
heading += np.pi * 2
depth_map = np.load(os.path.join(depth_dir, file.replace(".pt", "_depth.npy")))
point_clouds_2current = api.transformation_camera2robotcamera(np.expand_dims(depth_map / 10.0, axis=2))
color_map = cv2.imread(os.path.join(data_dir, file.replace(".pt", ".png")))
for w in range(point_clouds_2current.shape[0]):
for h in range(point_clouds_2current.shape[1]):
if np.count_nonzero(feature_map[w, h]) == 0:
continue
if color_map[w, h, 0] == 0 and color_map[w, h, 1] == 0 and color_map[w, h, 2] == 0:
continue
pc2r = [point_clouds_2current[w, h, j] for j in range(point_clouds_2current.shape[-1])]
pc2w = robot2world(position, pc2r[0] * 10, pc2r[1] * 10, heading)
pc2w[1] = pc2r[2] * 10 + pc2w[1]
if not (
(min_x - 0 < pc2w[0] < max_x + 0)
and (min_y - 0 < pc2w[1] < max_y + 0)
and (min_z - 0 < pc2w[2] < max_z + 0)
):
continue
else:
pc_pos.append(pc2w)
pc_feat.append(feature_map[w, h])
pc_pos = np.array(pc_pos)
pc_feat = np.array(pc_feat)
if len(pc_pos) > sample_num:
N = len(pc_pos)
indices = np.random.choice(N, sample_num, replace=False)
final_points = pc_pos[indices]
final_features = pc_feat[indices]
else:
final_points = pc_pos
final_features = pc_feat
print(final_points.shape)
torch.save(final_points, os.path.join(data_dir, "pcd_pos.pt"))
torch.save(final_features, os.path.join(data_dir, "pcd_feat.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--data_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--depth_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--feat_dir_path", default="./maskformer_masks/", type=str)
parser.add_argument("--sample_num", default=300000, type=int)
args = parser.parse_args()
room_list = os.listdir(args.data_dir_path)
for room_name in room_list:
main(room_name, args.data_dir_path, args.depth_dir_path, args.feat_dir_path, args.sample_num)
|
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 1
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = Andromeda()
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
#save the model weights
torch.save(model.state_dict(), f"./model_{i}.pth")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print('%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str) |
model = Andromeda().cuda()
x = torch.randint(0, 256, (1, 1024)).cuda()
model(x) # (1, 1024, 20000) |
# import bitsandbytes as bnb
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper)
transformer_auto_wrap_policy
)
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup, set_seed)
# from Andromeda.model import Andromeda
########### SETUP CONFIG
# state = AcceleratorState()
logger = get_logger(__name__, log_level="INFO")
class CFG:
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "Andromeda"
LOGGING_STEPS: int = 100
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, Transformer)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
Andromeda_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Transformer,
},
)
else:
Andromeda_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=Andromeda_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
try:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
except KeyError:
# print(f"Parameter {param_name} does not exist in the model")
pass
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
state = AcceleratorState()
state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #??????
accelerator.init_trackers(
project_name="Andromeda",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
# init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
# model = Andromeda(
# num_tokens=50432,
# max_seq_len=8192,
# dim=3072,
# depth=24,
# dim_head=128,
# heads=12,
# use_abs_pos_emb=False,
# alibi_pos_bias=True,
# alibi_num_heads=6,
# rotary_xpos=True,
# attn_flash=True,
# shift_tokens=1,
# attn_one_kv_head=True,
# qk_norm=True,
# attn_qk_norm=True,
# attn_qk_norm_dim_scale=True,
# embedding_provider=AndromedaEmbedding()
# )
model = Andromeda1Billion()
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='lion',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
# if False: # if CFG.USE_DEEPSPEED:
# lr_scheduler = DummyScheduler(
# optim,
# total_num_steps=max_train_steps * accelerator.num_processes,
# warmup_num_steps=NUM_WARMUP_STEPS
# )
# else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
#logging every CFG.LOGGING STEPS
if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0:
logger.info(
f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}"
)
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def main():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
if __name__ == '__main__':
main() |
Andromeda1Billion = Andromeda(
num_tokens=25000,
max_seq_len=4192,
dim=2048,
depth=16,
dim_head=128,
heads=8,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
Andromeda3Billion = Andromeda(
num_tokens=50432,
max_seq_len=8192,
dim=3072,
depth=24,
dim_head=128,
heads=12,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=6,
rotary_xpos=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
Andromeda7Billion = Andromeda(
num_tokens=50432,
max_seq_len=8192,
dim=4096,
depth=32,
dim_head=128,
heads=16,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=8,
rotary_xpos=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
Andromeda10Billion = Andromeda(
num_tokens=50432,
max_seq_len=8192,
dim=5120,
depth=32,
dim_head=128,
heads=20,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
Andromeda15Billion = Andromeda(
num_tokens=50432,
max_seq_len=8192,
dim=6144,
depth=40,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
Andromeda20Billion = Andromeda(
num_tokens=50432,
max_seq_len=8192,
dim=7168,
depth=48,
dim_head=128,
heads=28,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
)
#to GPT like 176Billion Parameters 122888 dimension, 96 depth, 96 heads, attn dim head 128 |
# from Andromeda.train import Train
from Andromeda.train import Train, train |
Decoder,
Transformer,
)
class AndromedaTokenizer:
def __init__(self):
self.tokenizer= AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
def tokenize_texts(self, texts):
return self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True).input_ids
def decode(self, texts):
return self.tokenizer.decode(texts)
def __len__(self):
num_tokens = len(self.tokenizer)
return num_tokens
class Andromeda(Module):
"""
Andromeda is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
"""
def __init__(self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
):
"""
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
super().__init__()
try:
self.Andromeda = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_kv_heads=attn_kv_heads,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.Andromeda)
except Exception as e:
print("Failed to initialize Andromeda: ", e)
raise
def forward(self, text_tokens, **kwargs):
"""
Forward pass through the model. It expects the input text_tokens.
Args:
- text_tokens: Input tokens
- kwargs: Other arguments
Returns:
- output from the decoder
"""
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise
|
########### SETUP CONFIG
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
# import bitsandbytes as bnb
BackwardPrefetch,
FullyShardedDataParallel,
MixedPrecision,
ShardingStrategy,
)
AutoTokenizer,
default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
set_seed,
)
# from Andromeda.model import Andromeda
# state = AcceleratorState()
logger = get_logger(__name__, log_level="INFO")
class CFG:
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "Andromeda"
LOGGING_STEPS: int = 100
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, Transformer)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
Andromeda_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Transformer,
},
)
else:
Andromeda_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=Andromeda_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
try:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
except KeyError:
# print(f"Parameter {param_name} does not exist in the model")
pass
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
state = AcceleratorState()
state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #??????
accelerator.init_trackers(
project_name="Andromeda",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
# init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
model = Andromeda1Billion()
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='lion',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
# if False: # if CFG.USE_DEEPSPEED:
# lr_scheduler = DummyScheduler(
# optim,
# total_num_steps=max_train_steps * accelerator.num_processes,
# warmup_num_steps=NUM_WARMUP_STEPS
# )
# else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
#logging every CFG.LOGGING STEPS
if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0:
logger.info(
f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}"
)
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def train():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
if __name__ == '__main__':
train() |
# class AndromedaEval:
# def __init__(self, path, seed=42, device=None):
# self.path = path
# self.seed = seed
# self.device = device
# if self.device is None:
# self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# set_seed(self.seed)
# #tokenizer
# self.tokenizer = AndromedaTokenizer
# #model
# self.model = Andromeda
# #checkpoint
# self.model.load_state_dict(torch.load(self.path))
# self.model.eval()
# #device
# self.model = self.model.to(self.device)
# #metrics
# self.metrics = {}
# self.reset_metrics()
# def reset_metrics(self):
# self.metrics = {
# "generation_steps": None,
# "time_forward": [],
# "time_forward_average": None,
# "memory_usages": [],
# "memory_usage_average": None,
# "time_end_to_end": None,
# "throughput": None
# }
# def get_num_params(self):
# num_params = sum(param.numel() for param in self.model.parameters() if param.requires_grad)
# return num_params
# def generate(self, prompt, generation_steps=32):
# #make sure all of the metrics reset at every generation
# self.reset_metrics()
# self.metrics["generation_steps"] = generation_steps
# tokens = self.tokenizer.encode(prompt)
# tokens_new = []
# time_end_to_end = time.time()
# #generation loop
# for _ in range(generation_steps):
# tokens_tensor = torch.tensor([tokens], device=self.device)
# #forward pass
# tracemalloc.start()
# time_forward_0 = time.time()
# logits = self.model(tokens_tensor, return_loss=False)[:, -1] # no loss takes the output of the last tokens
# time_forward_1 = time.time()
# _, memory_usage = tracemalloc.get_traced_memory()
# tracemalloc.stop()
# self.metrics["memory_usages"].append(memory_usage)
# time_forward = time_forward_1 - time_forward_0
# self.metrics["times_forward"].append(time_forward)
# next_token = torch.armax(logits).item()
# #save the newly generated token
# tokens.append(next_token)
# tokens_new.append(next_token)
# time_end_to_end_1 = time.time()
# time_end_to_end = time_end_to_end_1 - time_end_to_end_0
# self.metrics["time_end_to_end"] = time_end_to_end
# decoded = self.tokenizer.decode(tokens)
# self.metrics["time_forward_average"] = np.mean(self.metrics["times_forward"])
# self.metrics["memory_usage_average"] = np.mean(self.metrics["memory_usage"])
# self.metrics['throughput'] = generation_steps / np.sum(self.metrics["times_forward"])
# return tokens_new, decoded
# def main():
# prompt = 'My name is'
# andromeda = EvalAndromeda(path='checkpoints/step_44927_6656/pytorch_model.bin')
# num_params = Andromeda.get_num_params()
# print(f'The model has {num_params} parameters')
# _, output = Andromeda.generate(prompt)
# for metric, value in Andromeda.metrics.items():
# print(f'{metric}: {value}\n')
# print('\n')
# print(output)
def main():
allow_ops_in_compiled_graph()
torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
parser = argparse.ArgumentParser(description="Generate text using Andromeda model")
parser.add_argument("prompt", type=str, help="Text prompt to generate text")
parser.add_argument(
"--seq_len", type=int, default=256, help="Sequence length for generated text"
)
parser.add_argument(
"--temperature", type=float, default=0.8, help="Sampling temperature"
)
parser.add_argument(
"--filter_thres", type=float, default=0.9, help="Filter threshold for sampling"
)
parser.add_argument(
"--model",
type=str,
default="andromeda-e-1",
help="Model to use for generation",
)
parser.add_argument(
"--dtype",
type=str,
default="fp32",
help="Data type for the model: 'bf16', or 'fp32'",
)
args = parser.parse_args()
dtype = torch.float32
if args.dtype == 'bf16':
dtype = torch.bfloat16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#need to submit to torch hub
model = torch.hub.load("apacai/andromeda", args.model).to(device).to(dtype)
opt_model = torch.compile(model, backend="hidet")
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
encoded_text = tokenizer(args.prompt, return_tensors="pt")
output_tensor = opt_model.generate(
seq_len=args.seq_len,
prompt=encoded_text["input_ids"].to(device),
temperature=args.temperature,
filter_thres=args.filter_thres,
pad_value=0.0,
eos_token=tokenizer.eos_token_id,
return_seq_without_prompt=False,
use_tqdm=True,
)
decoded_output = tokenizer.batch_decode(output_tensor, skip_special_tokens=True)
return decoded_output
if __name__ == "__main__":
generated_text = main()
for text in generated_text:
print(f"{text}") |
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
|
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
|
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
# handle grouped multi-query attention
if kv_heads == 1:
k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
elif kv_heads < heads:
k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
# handle zero kv, as means for allowing network to attend to nothing
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates |
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def divisible_by(num, den):
return (num % den) == 0
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert divisible_by(dim, 2)
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
def scale_fn(t):
return t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
kv_heads = None,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both'
value_dim_head = default(value_dim_head, dim_head)
kv_heads = default(kv_heads, heads)
kv_heads = 1 if one_kv_head else kv_heads
assert divisible_by(heads, kv_heads)
self.kv_heads = kv_heads
q_dim = dim_head * heads
k_dim = dim_head * kv_heads
v_dim = value_dim_head * kv_heads
out_dim = value_dim_head * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out |
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
IS_CAUSAL: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1)
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0)
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(tl.float16)
# loop over k, v and update accumulator
lo = 0
hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX
for start_n in range(lo, hi, BLOCK_N):
# -- load k, v --
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
# -- compute qk ---
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if IS_CAUSAL:
qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
qk += tl.dot(q, k)
# -- compute scaling constant ---
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
# -- scale and update acc --
acc_scale = l_i * 0 + alpha # workaround some compiler bug
acc *= acc_scale[:, None]
acc += tl.dot(p.to(tl.float16), v)
# -- update m_i and l_i --
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
acc = acc / l_i[:, None]
l_ptrs = L + off_hz * N_CTX + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
tl.store(O_block_ptr, acc.to(tl.float16))
@triton.jit
def _bwd_preprocess(
Out, DO,
Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
# compute
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
CAUSAL: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
qk_scale = sm_scale * 1.44269504
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
if CAUSAL:
lo = start_n * BLOCK_M
else:
lo = 0
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
l_ptrs = L + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
if CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.), float("-inf"))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
l_i = tl.load(l_ptrs + offs_m_curr)
p = tl.math.exp2(qk - l_i[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
# compute dq
dq = tl.load(dq_ptrs)
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq)
# increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
empty = torch.empty(128, device="cuda")
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, causal, sm_scale):
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
o = torch.empty_like(q)
BLOCK_M = 128
BLOCK_N = 64
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q, k, v, sm_scale,
L,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
IS_CAUSAL=causal,
num_warps=num_warps,
num_stages=4)
ctx.save_for_backward(q, k, v, o, L)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, L = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
delta = torch.empty_like(L)
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do,
delta,
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do,
dq, dk, dv,
L, delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
CAUSAL=ctx.causal,
num_stages=1,
)
return dq, dk, dv, None, None
attention = _attention.apply
|
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1 |
# from palm_rlhf_pytorch.palm import LayerNorm
# from palm.utils import print_main
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float = 0.1,
beta_1: float = 0.90,
beta_2: float = 0.95,
optimizer_type: str = "adamw",
use_fsdp: bool = True,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
print_main(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
print_main(param_name)
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "adamw":
optimizer = AdamW(
grouped_params,
lr=learning_rate,
betas=(beta_1, beta_2),
)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params,
lr=learning_rate,
betas=(beta_1, beta_2),
)
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer |
#helpers
def exists(val):
return val is not None
#decorators
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def defaults(val, d):
return val if exists(val) else d
#tensor helpers
def log(t, eps=1e-20):
return torch.log(t.clamp(min = eps))
def masked_mean(seq, mask=None, dim=1, keepdim=True):
if not exists(mask):
return seq.mean(dim=dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim=dim, keepdim=keepdim)
denom = mask.sum(dim=dim, keepdim=keepdim)
masked_mean = numer / denom.clamp(min = 1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
#sampling helpers
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim=-1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim)
def top_p(logits, thres=0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.einsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float("-inf")
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres=0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class LoRA(nn.Module):
def __init__(
self,
dim,
dim_out,
r=8,
alpha=None
):
super().__init__()
alpha = defaults(alpha, r)
self.scale = alpha / r
self.A = nn.Parameter(torch.randn(dim, r))
self.B = nn.Parameter(torch.zeros(r, dim_out))
#reward model
@beartype
class RewardModel(nn.Module):
def __init__(
self,
model: Andromeda,
dropout=0.1,
num_binned_output = 0.,
use_lora = True,
lora_r = 8,
reward_lora_scope = 'reward',
):
super().__init__()
self.model = copy.deepcopy(Andromeda)
self.model.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope is use_lora else None
if exists(self.reward_lora_scope):
self.model.add_finetune_params(reward_lora_scope, lora_r = lora_r)
dim = model.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias=False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def finetune_parameters(self):
return (
*self.to_pred.parameters(),
*(self.model.finetune_parameters(self.reward_lora_scope) if exists(self.reward_lora_scope) else model.parameters())
)
def forward(
self,
x,
mask=None,
prompt_mask=None,
prompt_lengths=None,
labels=None,
sample=False,
sample_temperature=1.,
disable_lora=False
):
assert not (exists(prompt_mask) and exists(prompt_lengths))
#derive prompt mask from prompt lengths
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device = x.device)
prompt_mask = repeat(arange, 'n -> n n', b = batch) > rearrange(prompt_lengths, 'b -> b 1')
#rward model should have an understand of which section is prompt and which section is repsonse
extra_embed = None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
embeds = self.model(
x,
) |
def print_num_params(model):
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if dist.is_available():
if dist.get_rank() == 0:
print(f"Number of parameters in model: {n_params}")
else:
print(f"Number of parameters in model: {n_params}")
def print_main(msg):
if dist.is_available():
if dist.get_rank() == 0:
print(msg)
else:
print(msg) |
class TestDatasetBuilder(unittest.TestCase):
def setUp(self):
self.builder = DatasetBuilder(dataset_name="tiiuae/falcon-refinedweb")
def test_initialization(self):
self.assertEqual(self.builder.dataset_name, "tiiuae/falcon-refinedweb", "Dataset name is not correctly set.")
self.assertEqual(self.builder.seq_len, 8192, "Sequence length is not correctly set.")
self.assertEqual(self.builder.tokenizer, "EleutherAI/gpt-neox-20b", "Tokenizer is not correctly set.")
def test_build_dataset(self):
dataset = self.builder.build_dataset()
self.assertIsNotNone(dataset, "Dataset is not built.")
self.assertTrue(hasattr(dataset, "map"), "Dataset does not have a map method.")
def test_tokenize_function(self):
example = {"text": ["Hello, world!", "Andromeda is great."]}
tokenized_example = self.builder.tokenize_function(example)
self.assertIsInstance(tokenized_example, dict, "Tokenized example is not a dictionary.")
self.assertTrue(all(isinstance(t, list) for t in tokenized_example.values()), "Tokenized example values are not lists.")
def test_group_texts(self):
examples = {"input_ids": [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] * 10}
grouped_examples = self.builder.group_texts(examples)
self.assertIsInstance(grouped_examples, dict, "Grouped examples is not a dictionary.")
self.assertTrue(all(isinstance(t, list) for t in grouped_examples.values()), "Grouped example values are not lists.")
self.assertTrue(all(len(t) == self.builder.seq_len for t in grouped_examples["input_ids"]), "Grouped example sequences are not the correct length.")
if __name__ == '__main__':
unittest.main() |
class TestAndromeda(unittest.TestCase):
def setUp(self):
self.model = Andromeda()
def test_initialization(self):
self.assertIsNotNone(self.model.andromeda, "Transformer is not initialized.")
self.assertIsNotNone(self.model.decoder, "AutoregressiveWrapper is not initialized.")
def test_forward_pass(self):
input_tokens = torch.randint(0, 50432, (1, 8192))
output = self.model(input_tokens)
self.assertIsInstance(output, torch.Tensor, "Output is not a PyTorch tensor.")
self.assertEqual(output.shape[0], input_tokens.shape[0], "Output batch size does not match input.")
def test_error_handling(self):
with self.assertRaises(Exception):
self.model.forward(None)
def test_model_parameters(self):
self.assertEqual(self.model.Andromeda.num_tokens, 50432, "Number of tokens is not correctly set.")
self.assertEqual(self.model.Andromeda.max_seq_len, 8192, "Max sequence length is not correctly set.")
def test_model_output(self):
input_tokens = torch.randint(0, 50432, (1, 8192))
output1 = self.model(input_tokens)
output2 = self.model(input_tokens)
self.assertTrue(torch.allclose(output1, output2), "Model does not produce consistent output.")
class TestAndromedaExtended(unittest.TestCase):
def setUp(self):
self.model = Andromeda()
def test_input_size(self):
for seq_len in [512, 1024, 2048, 4096]:
input_tokens = torch.randint(0, 50432, (1, seq_len))
output = self.model(input_tokens)
self.assertEqual(output.shape[1], seq_len, f"Output sequence length does not match input for seq_len={seq_len}.")
def test_batch_size(self):
for batch_size in [2, 4, 8, 16]:
input_tokens = torch.randint(0, 50432, (batch_size, 8192))
output = self.model(input_tokens)
self.assertEqual(output.shape[0], batch_size, f"Output batch size does not match input for batch_size={batch_size}.")
def test_token_range(self):
for token in [0, 50431]:
input_tokens = torch.full((1, 8192), fill_value=token)
output = self.model(input_tokens)
self.assertIsInstance(output, torch.Tensor, f"Output is not a PyTorch tensor for token={token}.")
def test_model_depth(self):
for depth in [16, 32, 64]:
model = Andromeda(depth=depth)
self.assertEqual(model.Andromeda.attn_layers.depth, depth, f"Model depth is not correctly set for depth={depth}.")
def test_model_dim(self):
for dim in [1280, 2560, 5120]:
model = Andromeda(dim=dim)
self.assertEqual(model.Andromeda.attn_layers.dim, dim, f"Model dimension is not correctly set for dim={dim}.")
def test_model_heads(self):
for heads in [12, 24, 48]:
model = Andromeda(heads=heads)
self.assertEqual(model.Andromeda.attn_layers.heads, heads, f"Number of heads is not correctly set for heads={heads}.")
def test_model_dim_head(self):
for dim_head in [64, 128, 256]:
model = Andromeda(dim_head=dim_head)
self.assertEqual(model.Andromeda.attn_layers.dim_head, dim_head, f"Head dimension is not correctly set for dim_head={dim_head}.")
def test_model_alibi_num_heads(self):
for alibi_num_heads in [6, 12, 24]:
model = Andromeda(alibi_num_heads=alibi_num_heads)
self.assertEqual(model.Andromeda.attn_layers.alibi_num_heads, alibi_num_heads, f"Number of alibi heads is not correctly set for alibi_num_heads={alibi_num_heads}.")
def test_model_shift_tokens(self):
for shift_tokens in [0, 1, 2]:
model = Andromeda(shift_tokens=shift_tokens)
self.assertEqual(model.Andromeda.attn_layers.shift_tokens, shift_tokens, f"Number of shift tokens is not correctly set for shift_tokens={shift_tokens}.")
def test_model_use_abs_pos_emb(self):
for use_abs_pos_emb in [True, False]:
model = Andromeda(use_abs_pos_emb=use_abs_pos_emb)
self.assertEqual(model.Andromeda.use_abs_pos_emb, use_abs_pos_emb, f"Use absolute position embedding flag is not correctly set for use_abs_pos_emb={use_abs_pos_emb}.")
def test_model_alibi_pos_bias(self):
for alibi_pos_bias in [True, False]:
model = Andromeda(alibi_pos_bias=alibi_pos_bias)
self.assertEqual(model.Andromeda.attn_layers.alibi_pos_bias, alibi_pos_bias, f"Alibi position bias flag is not correctly set for alibi_pos_bias={alibi_pos_bias}.")
def test_model_rotary_xpos(self):
for rotary_xpos in [True, False]:
model = Andromeda(rotary_xpos=rotary_xpos)
self.assertEqual(model.Andromeda.attn_layers.rotary_xpos, rotary_xpos, f"Rotary position flag is not correctly set for rotary_xpos={rotary_xpos}.")
def test_model_attn_flash(self):
for attn_flash in [True, False]:
model = Andromeda(attn_flash=attn_flash)
self.assertEqual(model.Andromeda.attn_layers.attn_flash, attn_flash, f"Attention flash flag is not correctly set for attn_flash={attn_flash}")
if __name__ == '__main__':
unittest.main() |
class TestAndromedaTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = AndromedaTokenizer()
def test_initialization(self):
self.assertIsNotNone(self.tokenizer.tokenizer, "Tokenizer is not initialized.")
self.assertEqual(self.tokenizer.tokenizer.eos_token, "<eos>", "EOS token is not correctly set.")
self.assertEqual(self.tokenizer.tokenizer.pad_token, "<pad>", "PAD token is not correctly set.")
self.assertEqual(self.tokenizer.tokenizer.model_max_length, 8192, "Model max length is not correctly set.")
def test_tokenize_texts(self):
texts = ["Hello, world!", "Andromeda is great."]
tokenized_texts = self.tokenizer.tokenize_texts(texts)
self.assertEqual(tokenized_texts.shape[0], len(texts), "Number of tokenized texts does not match input.")
self.assertTrue(all(isinstance(t, torch.Tensor) for t in tokenized_texts), "Not all tokenized texts are PyTorch tensors.")
def test_decode(self):
texts = ["Hello, world!", "Andromeda is great."]
tokenized_texts = self.tokenizer.tokenize_texts(texts)
decoded_texts = [self.tokenizer.decode(t) for t in tokenized_texts]
self.assertEqual(decoded_texts, texts, "Decoded texts do not match original texts.")
def test_len(self):
num_tokens = len(self.tokenizer)
self.assertIsInstance(num_tokens, int, "Number of tokens is not an integer.")
self.assertGreater(num_tokens, 0, "Number of tokens is not greater than 0.")
if __name__ == '__main__':
unittest.main() |
# from Andromeda.model import Andromeda
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AccuracyMetrics:
def __init__(self):
self.rouge = Rouge()
def calculate_perplexity(self, model, data_loader):
model.eval()
total_loss = 0
with torch.no_grad():
for batch in data_loader:
input_ids, labels = batch
output = model(input_ids)
loss = F.cross_entropy(output.view(-1, output.size(-1)), labels.view(-1))
total_loss += loss.item()
return torch.exp(torch.tensor(total_loss / len(data_loader)))
def calculate_bleu(self, references, hypotheses):
return corpus_bleu(references, hypotheses)
def calculate_rouge(self, references, hypotheses):
scores = self.rouge.get_scores(hypotheses, references, avg=True)
return scores
def calculate_f1(self, true_labels, pred_labels):
return f1_score(true_labels, pred_labels, average="weighted")
#mock test dataset
test_dataset = datasets.FakeData(size=1000, transform=transforms.ToTensor())
#model
model = Andromeda(
num_tokens=50304,
dim=1024,
depth=24,
dim_head=128,
heads=8,
alibi_num_heads=4
)
# Usage:
accuracy_metrics = AccuracyMetrics()
# Calculate Perplexity
perplexity = accuracy_metrics.calculate_perplexity(model, data_loader)
print('Perplexity:', perplexity)
# Calculate BLEU
bleu = accuracy_metrics.calculate_bleu(references, hypotheses)
print('BLEU Score:', bleu)
# Calculate ROUGE
rouge_scores = accuracy_metrics.calculate_rouge(references, hypotheses)
print('ROUGE Scores:', rouge_scores)
# Calculate F1 Score
f1 = accuracy_metrics.calculate_f1(true_labels, pred_labels)
print('F1 Score:', f1)
# Add at the bottom of your file
if __name__ == "__main__":
AccuracyMetrics() |
# from Andromeda.model import Andromeda
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AndromedaModelTest:
def __init__(self):
self.model = Andromeda
self.optimizer = StableAdamWUnfused()
self.loss_function = torch.nn.CrossEntropyLoss()
self.test_input = torch.randint(0, 256, (1, 1024)).cuda()
def test_forward_pass(self):
output = self.model(self.test_input)
assert output.shape == (1, 1024, 64007), "Forward pass output shape mismatch"
def test_backward_pass(self):
self.optimizer.zero_grad()
output = self.model(self.test_input)
loss = self.loss_function(output, self.test_input)
loss.backward()
for name, parameter in self.model.named_parameters():
assert not torch.isnan(parameter.grad().any()), f"Gradient for {name} contains NaNs"
assert not torch.isinf(parameter.grad().any()), f"Gradient for {name} contains Infs"
def test_optimizer_step(self):
initial_params = [param.clone() for param in self.model_parameters()]
output = self.model(self.test_input)
loss = self.loss_function(output, self.test_input)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
for initial_param, param in zip(initial_params, self.model.parameters()):
assert not torch.equal(initial_param, param), "Model Parameters did not change after an optimizer step"
class SpeedMetrics:
def __init__(self, model):
self.model = model.to(device)
def forward_pass_time(self):
start_time = time.time()
self.model.decoder.forward(torch.randint(0, 50304, (1, 8192), device=device, dtype=torch.long))[0]
end_time = time.time()
return end_time - start_time
def backward_pass_time(self):
model_input = self.model.decoder.forward(torch.randint(0, 50304, (1, 8192), device=device, dtype=torch.long))[0]
start_time = time.time()
loss = torch.nn.CrossEntropyLoss()(model_input, torch.randint(0, 50304, (1, 8192), device=device, dtype=torch.long))
loss.backward()
end_time = time.time()
return end_time - start_time
def end_to_end_latency(self):
start_time = time.time()
self.model.forward(torch.randint(0, 50304, (1, 8192), device=device, dtype=torch.long))
end_time = time.time()
return end_time - start_time
class ScalabilityMetrics:
def __init__(self, model, dataset):
self.model = model
self.dataset = dataset
self.dataloader = DataLoader(dataset, batch_size=32)
def throughput(self):
start_time = time.time()
for i, data in enumerate(self.dataloader, 0):
self.model.forward(data)
end_time = time.time()
return len(self.dataset) / (end_time - start_time)
class ConsistencyMetrics:
def __init__(self, model):
self.model = model
def consistency_over_time(self):
consistency_times = []
outputs_list = []
for _ in range(10):
start_time = time.time()
outputs = self.model.forward(torch.randint(0, 50304, (1, 8192)))
end_time = time.time()
consistency_times.append(end_time - start_time)
outputs_list.append(outputs.detach().numpy())
initial_output = outputs_list[0]
consistency_score = 0
for output in outputs_list[1:]:
if np.array_equal(initial_output, output):
consistency_score += 1
consistency_score = consistency_score / len(outputs_list) * 100
return consistency_times, consistency_score
class MemoryMetrics:
def __init__(self, model):
self.model = model
def memory_footprint(self):
tracemalloc.start()
self.model.forward(torch.randint(0, 50304, (1, 8192)))
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return current, peak
class SequenceMetrics:
def __init__(self, model):
self.model = model
def sequence_length_impact(self):
seq_lengths = [1024, 2048, 4096, 8192]
seq_impact_times = []
for length in seq_lengths:
start_time = time.time()
self.model.forward(torch.randint(0, 50304, (1, length)))
end_time = time.time()
seq_impact_times.append(end_time - start_time)
return seq_lengths, seq_impact_times
class FlopsBenchmark:
def __init__(self, model, bsz=32, d_model=1024, num_heads=8, sequence_lengths=list(range(500, 32001, 500))):
self.bsz = bsz
self.d_model = d_model
self.num_heads = num_heads
self.sequence_lengths = sequence_lengths
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.dtype=torch.float32
self.model = model.to(self.device)
def benchmark(self):
time_taken = []
tflops_per_s = []
for seq_len in self.sequence_lengths:
x = torch.randn(self.bsz, seq_len, self.d_model).to(self.device).type(self.dtype)
torch.cuda.synchronize()
start = time.time()
self.model(x)
torch.cuda.synchronize()
elapsed = time.time() - start
time_taken.append(elapsed)
total_flops = 4 * seq_len **2 * (self.d_model // self.num_heads) * self.num_heads
tflops_per_s.append(total_flops / elapsed / 1e12) # Convert to TFLOPs
for seq_len, elapsed, tflops in zip(self.sequence_lengths, time_taken, tflops_per_s):
print(f"Sequence length: {seq_len}, Time elapsed: {elapsed} s, TFLOPs/s: {tflops}")
#mock test dataset
test_dataset = datasets.FakeData(size=1000, transform=transforms.ToTensor())
#model
model = Andromeda(
num_tokens=50304,
dim=1024,
depth=24,
dim_head=128,
heads=8,
alibi_num_heads=4
)
#speed test metrics test
# speed test metrics test
speed_metrics = SpeedMetrics(model)
forward_pass_time = speed_metrics.forward_pass_time()
backward_pass_time = speed_metrics.backward_pass_time()
end_to_end_latency = speed_metrics.end_to_end_latency()
#scalability metrics test
scalability_metrics = ScalabilityMetrics(model, test_dataset)
throughput = scalability_metrics.throughput()
#consistency metrucs test
consistency_metrics = ConsistencyMetrics(model)
consistency_times, consistency_score = consistency_metrics.consistency_over_time()
#memory metrics test
memory_metrics = MemoryMetrics(model)
current, peak = memory_metrics.memory_footprint()
#sequence metrics test
sequence_metrics = SequenceMetrics(model)
seq_lengths, seq_impact_times = sequence_metrics.sequence_length_impact()
#flops
flops_benchmark = FlopsBenchmark(model)
flops_benchmark.benchmark()
# Graphical Interface
fig, axs = plt.subplots(3)
axs[0].bar(["Forward Pass Time", "Backward Pass Time", "End-to-End Latency"], [forward_pass_time, backward_pass_time, end_to_end_latency])
axs[0].set_title('Speed Metrics')
axs[0].set_xlabel('Metrics')
axs[0].set_ylabel('Time (seconds)')
axs[1].bar(seq_lengths, seq_impact_times)
axs[1].set_title('Sequence Length Impact')
axs[1].set_xlabel('Sequence Length')
axs[1].set_ylabel('Time (seconds)')
axs[2].plot(list(range(1, 11)), consistency_times)
axs[2].set_title('Consistency Over Time')
axs[2].set_xlabel('Run Number')
axs[2].set_ylabel('Time (seconds)')
plt.tight_layout()
plt.show()
print(f"Throughput: {throughput} instances/second")
print(f"Memory used: {current / 10**6}MB; Peak: {peak / 10**6}MB")
# Add at the bottom of your file
if __name__ == "__main__":
model_test = AndromedaModelTest()
model_test.test_forward_pass()
model_test.test_backward_pass()
model_test.test_optimizer_step() |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Run pytest using MCP."""
stop_run, wait_for_run_status)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name',
type=str,
default='mcp-pytest',
help='Base name of run')
parser.add_argument('--cluster',
type=str,
default='r1z4',
help='Cluster to use')
parser.add_argument('--gpu_type',
type=str,
default='a100_40gb',
help='Type of GPU to use')
parser.add_argument('--gpu_num',
type=int,
default=2,
help='Number of the GPU to use')
parser.add_argument('--image',
type=str,
default='mosaicml/pytorch:latest',
help='Docker image to use')
parser.add_argument('--git_branch',
type=str,
help='Git branch to check out')
parser.add_argument(
'--git_commit',
type=str,
help='Git commit to check out. Overrides git_branch if specified')
parser.add_argument(
'--pr_number',
type=int,
help=
'PR number to check out. Overrides git_branch/git_commit if specified')
parser.add_argument('--pytest_markers',
type=str,
help='Markers to pass to pytest')
parser.add_argument('--pytest_command',
type=str,
help='Command to run pytest')
parser.add_argument('--timeout',
type=int,
default=1800,
help='Timeout for run (in seconds)')
args = parser.parse_args()
name = args.name
git_integration = {
'integration_type': 'git_repo',
'git_repo': 'mosaicml/llm-foundry',
'ssh_clone': 'False',
}
if args.git_branch is not None and args.git_commit is None:
name += f'-branch-{args.git_branch}'
git_integration['git_branch'] = args.git_branch
if args.git_commit is not None:
name += f'-commit-{args.git_commit}'
git_integration['git_commit'] = args.git_commit
command = 'cd llm-foundry'
# Checkout a specific PR if specified
if args.pr_number is not None:
name += f'-pr-{args.pr_number}'
command += f'''
git fetch origin pull/{args.pr_number}/head:pr_branch
git checkout pr_branch
'''
# Shorten name if too long
if len(name) > 56:
name = name[:56]
command += f'''
pip install --upgrade --user .[all]
export COMMON_ARGS="-v --durations=20 -m '{args.pytest_markers}'"
make test PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS --codeblocks"
make test-dist PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS" WORLD_SIZE=2
python -m coverage combine
python -m coverage report
'''
config = RunConfig(
name=name,
cluster=args.cluster,
gpu_type=args.gpu_type,
gpu_num=args.gpu_num,
image=args.image,
integrations=[git_integration],
command=command,
)
# Create run
run = create_run(config)
print(f'[GHA] Run created: {run.name}')
# Wait until run starts before fetching logs
run = wait_for_run_status(run, status='running')
start_time = time.time()
print('[GHA] Run started. Following logs...')
# Print logs
for line in follow_run_logs(run):
print(line, end='')
# Check if args.timeout seconds have elapsed
if time.time() - start_time > args.timeout:
print(
f'[GHA] Run timed out and did not complete in {args.timeout/60} minutes.'
)
run = stop_run(run)
print('[GHA] Run stopped.')
break
print('[GHA] Run completed. Waiting for run to finish...')
run = wait_for_run_status(run, status='completed')
# Fail if command exited with non-zero exit code or timed out
assert run.status == RunStatus.COMPLETED
|
# from pdfminer.high_level import extract_text
def search_science_books(query, num_results=1000):
s = LibgenSearch()
results = s.search_title(query)
science_books = [book for book in results if "science" in book["Title"].lower()]
return science_books[:num_results]
def download_book(url, save_path, max_retries=3, timeout=10):
retries = 0
while retries < max_retries:
try:
response = requests.get(url, timeout=timeout)
with open(save_path, 'wb') as f:
f.write(response.content)
return
except RequestException:
print(f"Download failed, retrying {retries + 1}/{max_retries}")
retries += 1
time.sleep(2)
print(f"Failed to download {url} after {max_retries} retries.")
def extract_epub_content(file_path):
book = epub.read_epub(file_path)
content = []
for item in book.get_items():
if item.get_type() == epub.ITEM_DOCUMENT:
content.append(BeautifulSoup(item.get_content(), 'html.parser').get_text())
return '\n'.join(content)
def extract_pdf_content(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader(f)
content = []
for i in range(len(pdf.pages)):
content.append(pdf.pages[i].extract_text())
print(content)
return ''.join(content)
def extract_epub_metadata(file_path):
book = epub.read_epub(file_path)
return {
'title': book.get_metadata('DC', 'title')[0][0],
'authors': [author[0] for author in book.get_metadata('DC', 'creator')],
'language': book.get_metadata('DC', 'language')[0][0],
'publisher': book.get_metadata('DC', 'publisher')[0][0] if book.get_metadata('DC', 'publisher') else '',
'published': book.get_metadata('DC', 'date')[0][0] if book.get_metadata('DC', 'date') else '',
'content': extract_epub_content(file_path),
}
def extract_pdf_metadata(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader(f)
info = pdf.getDocumentInfo()
return {
'title': info.title,
'authors': [info.author] if info.author else [],
'language': '',
'publisher': info.producer if info.producer else '',
'published': info.creationDate if info.creationDate else '',
'content': extract_pdf_content(file_path),
}
def extract_metadata(file_path, extension):
if extension == 'epub':
return extract_epub_metadata(file_path)
elif extension == 'pdf':
return extract_pdf_metadata(file_path)
else:
return {}
def save_structured_data(structured_data, file_path):
with gzip.open(file_path, 'wt', encoding='utf-8') as f:
json.dump(structured_data, f, ensure_ascii=False, indent=4)
def process_book(book, download_directory, structured_data_directory):
s = LibgenSearch()
download_links = s.resolve_download_links(book)
download_url = download_links.get("GET")
if download_url:
save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
download_book(download_url, save_path)
print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
metadata = extract_metadata(save_path, book["Extension"])
structured_data = {
"ID": book["ID"],
"Title": book["Title"],
"Authors": book["Author"].split(', '),
"Publisher": book["Publisher"],
"Year": book["Year"],
"Pages": book["Pages"],
"Language": book["Language"],
"Size": book["Size"],
"Extension": book["Extension"],
**metadata
}
print(structured_data)
structured_data_file = os.path.join(structured_data_directory, f"{book['ID']}.json.gz")
save_structured_data(structured_data, structured_data_file)
print(f"Saved structured data for {book['Title']} by {book['Author']} to {structured_data_file}")
s = LibgenSearch()
books = search_science_books("science", num_results=10)
download_directory = "downloads"
os.makedirs(download_directory, exist_ok=True)
structured_data_directory = "structured_data"
os.makedirs(structured_data_directory, exist_ok=True)
# Use threading to speed up the downloading and processing of books
threads = []
for book in books:
thread = threading.Thread(target=process_book, args=(book, download_directory, structured_data_directory))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print("Finished processing all books.")
|
#example
#download books -> analyze them -> organize in a structured format[title, authors, content, metadata, published] -> each book with all data formats => json
def search_science_books(query, num_results=100):
s = LibgenSearch()
results = s.search_title(query)
print(results)
science_books = [book for book in results if "science" in book["Title"].lower()]
print(science_books)
return science_books[:num_results]
# books = search_science_books("science")
# download_directory = "downloads"
# os.makedirs(download_directory, exist_ok=True)
# for book in books:
# download_links = s.resolve_download_links(book)
# download_url = download_links.get("GET")
# if download_url:
# save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
# download_book(download_url, save_path)
# print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
# zip_filename = "science_books.zip"
# with zipfile.ZipFile(zip_filename, 'w') as zipf:
# for root, _, files in os.walk(download_directory):
# for file in files:
# file_path = os.path.join(root, file)
# zipf.write(file_path, os.path.relpath(file_path, download_directory))
# harvestor
def download_book(url, save_path, max_retries=3, timeout=10):
retries = 0
while retries < max_retries:
try:
response = requests.get(url, timeout=timeout)
with open(save_path, 'wb') as f:
f.write(response.content)
return
except RequestException:
print(f"Download failed, retrying {retries + 1}/{max_retries}")
retries += 1
time.sleep(2) #wait for 2 seconds before retrying
print(f"Failed to download {url} after {max_retries} retries.")
def extract_epub_content(file_path):
book = epub.read_epub(file_path)
content = []
for item in book.get_item():
if item.get_type() == epub.ITEM_DOCUMENT:
content.append(item.get_content().decode('utf-8'))
return ''.join(content)
def extract_pdf_content(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader()
content = []
for i in range(pdf.getNumPages()):
content.append(pdf.getPage(i).extract_text())
print(content)
return ''.join(content)
print(extract_pdf_content)
def extract_epub_metadata(file_path):
book = epub.read_epub(file_path)
return {
'title': book.get_metadata('DC', 'title')[0][0],
'authors': [author[0] for author in book.get_metadata('DC', 'creator')],
'language': book.get_metadata('DC', 'language')[0][0],
'publisher': book.get_metadata('DC', 'publisher')[0][0] if book.get_metadata('DC', 'publisher') else '',
'published': book.get_metadata('DC', 'date')[0][0] if book.get_metadata('DC', 'date') else '',
'content': extract_epub_content(file_path),
}
print(extract_epub_metadata)
def extract_pdf_metadata(file_path):
with open(file_path, 'rb') as f:
pdf = PdfFileReader(f)
info = pdf.getDocumentInfo()
return {
'title': info.title,
'authors': [info.author] if info.author else [],
'language': '',
'publisher': info.producer if info.producer else '',
'published': info.creationDate if info.creationDate else '',
'content': extract_pdf_content(file_path),
}
print(extract_pdf_metadata)
def extract_metadata(file_path, extension):
if extension == 'epub':
return extract_epub_metadata(file_path)
elif extension == 'pdf':
return extract_pdf_metadata(file_path)
else:
return {}
s = LibgenSearch()
books = search_science_books("science")
download_directory = "downloads"
os.makedirs(download_directory, exist_ok=True)
structured_data = []
structured_data_directory = "structured_data"
os.makedirs(structured_data_directory, exist_ok=True)
for book in books:
download_links = s.resolve_download_links(book)
download_url = download_links.get("GET")
if download_url:
save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
download_book(download_url, save_path)
print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
metadata = extract_metadata(save_path, book["Extension"])
structured_data = {
"ID": book["ID"],
"Title": book["Title"],
"Authors": book["Author"].split(', '),
"Publisher": book["Publisher"],
"Year": book["Year"],
"Pages": book["Pages"],
"Language": book["Language"],
"Size": book["Size"],
"Extension": book["Extension"],
**metadata
}
print(structured_data)
structured_data_file = os.path.join(structured_data_directory, f"{book['ID']}.json")
with open(structured_data_file, 'w', encoding='utf-8') as f:
json.dump(structured_data, f, ensure_ascii=False, indent=4)
print(f"Saved structured data for {book['Title']} by {book['Author']} to {structured_data_file}") |
title = "Pride and Prejudice"
author = "Agatha Christie"
ls = LibgenSearch()
class TestBasicSearching:
def test_title_search(self):
titles = ls.search_title(title)
first_result = titles[0]
assert title in first_result["Title"]
def test_author_search(self):
titles = ls.search_author(author)
first_result = titles[0]
assert author in first_result["Author"]
def test_title_filtering(self):
title_filters = {"Year": "2007", "Extension": "epub"}
titles = ls.search_title_filtered(title, title_filters, exact_match=True)
first_result = titles[0]
assert (title in first_result["Title"]) & fields_match(
title_filters, first_result
)
def test_author_filtering(self):
author_filters = {"Language": "German", "Year": "2009"}
titles = ls.search_author_filtered(author, author_filters, exact_match=True)
first_result = titles[0]
assert (author in first_result["Author"]) & fields_match(
author_filters, first_result
)
# explicit test of exact filtering
# should return no results as they will all get filtered out
def test_exact_filtering(self):
exact_filters = {"Extension": "PDF"}
# if exact_match = True, this will filter out all results as
# "pdf" is always written lower case on Library Genesis
titles = ls.search_author_filtered(author, exact_filters, exact_match=True)
assert len(titles) == 0
def test_non_exact_filtering(self):
non_exact_filters = {"Extension": "PDF"}
titles = ls.search_author_filtered(author, non_exact_filters, exact_match=False)
first_result = titles[0]
assert (author in first_result["Author"]) & fields_match(
non_exact_filters, first_result, exact=False
)
def test_non_exact_partial_filtering(self):
partial_filters = {"Extension": "p", "Year": "200"}
titles = ls.search_title_filtered(title, partial_filters, exact_match=False)
first_result = titles[0]
assert (title in first_result["Title"]) & fields_match(
partial_filters, first_result, exact=False
)
def test_exact_partial_filtering(self):
exact_partial_filters = {"Extension": "p"}
titles = ls.search_title_filtered(
title, exact_partial_filters, exact_match=True
)
assert len(titles) == 0
def test_resolve_download_links(self):
titles = ls.search_author(author)
title_to_download = titles[0]
dl_links = ls.resolve_download_links(title_to_download)
# ensure each host is in the results and that they each have a url
assert (["GET", "Cloudflare", "IPFS.io", "Infura"] == list(dl_links.keys())) & (
False not in [len(link) > 0 for key, link in dl_links.items()]
)
# should return an error if search query is less than 3 characters long
def test_raise_error_on_short_search(self):
with pytest.raises(Exception):
titles = ls.search_title(title[0:2])
####################
# Helper Functions #
####################
# Check object fields for equality -
# -> Returns True if they match.
# -> Returns False otherwise.
#
# when exact-True, fields are checked strictly (==).
#
# when exact=False, fields are normalized to lower case,
# and checked whether filter value is a subset of the response.
def fields_match(filter_obj, response_obj, exact=True):
for key, value in filter_obj.items():
if exact is False:
value = value.lower()
response_obj[key] = response_obj[key].lower()
if value not in response_obj[key]:
return False
elif response_obj[key] != value:
return False
return True
|
"""
Basic testing script for libgen-api.
Runs through a number of searches using different parameters, outputs results to terminal.
Run -
python3 test.py
"""
title = "Pride and Prejudice"
author = "Agatha Christie"
# helper function to print first title if it exists.
def print_results(titles_array):
print(json.dumps(titles_array[0], indent=1) if len(titles_array) else "No results.")
print("\n\n--- END OF OUTPUT ---\n\n")
# test title search
# should print a result for the book specified at the top of the file.
t = LibgenSearch()
print("\n>>>\tSearching for title: " + title)
titles = t.search_title(title)
print_results(titles)
# test author search
# should print a result for the author specified at the top of the file.
a = LibgenSearch()
print("\n>>>\tSearching for author: " + author)
titles = a.search_author(author)
print_results(titles)
# test title filtering
# should print a result for the book specified at the top of the file,
# conforming to the title_filters below.
tf = LibgenSearch()
title_filters = {"Year": "2007", "Extension": "epub"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in title_filters.items()])
)
titles = tf.search_title_filtered(title, title_filters, exact_match=True)
print_results(titles)
# test author filtering
# should print a result for the author specified at the top of the file,
# conforming to the title_filters below.
af = LibgenSearch()
author_filters = {"Language": "German", "Year": "2009"}
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in author_filters.items()])
)
titles = af.search_author_filtered(author, author_filters, exact_match=True)
print_results(titles)
# test exact filtering explicitly (using an Author search)
# should print no results as the filter exclude all results.
afe = LibgenSearch()
exact_filters = {
"Extension": "PDF"
} # if exact_match = True, all results get filtered as "pdf" is always written lower case
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in exact_filters.items()])
+ " & exact_match == True"
)
titles = afe.search_author_filtered(author, exact_filters, exact_match=True)
print_results(titles)
# test non-exact filtering (using an Author search)
# should print a result for the author specified at the top of the file,
# conforming to the title_filters below.
afne = LibgenSearch()
non_exact_filters = {
"Extension": "PDF"
} # if exact_match = True, all results get filtered as "pdf" is always written lower case
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in non_exact_filters.items()])
+ " & exact_match == FALSE"
)
titles = afne.search_author_filtered(author, non_exact_filters, exact_match=False)
print_results(titles)
# test partial filtering (using a Title)
# should print a result for the title specified at the top of the file,
# conforming to the non_exact_filter below, with non-exact matching.
tfpne = LibgenSearch()
partial_filters = {"Extension": "p", "Year": "200"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in partial_filters.items()])
+ " & exact_match == False"
)
titles = tfpne.search_title_filtered(title, partial_filters, exact_match=False)
print_results(titles)
# test partial filtering (using a Title)
# should return nothing as the extension is not an exact match to an existing one (ie. "pdf")
tfpe = LibgenSearch()
exact_partial_filters = {"Extension": "p"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in exact_partial_filters.items()])
+ " & exact_match == True"
)
titles = tfpe.search_title_filtered(title, exact_partial_filters, exact_match=True)
print_results(titles)
# test resolving of mirror links
# should print a populated hash of source:download_link pairs
arml = LibgenSearch()
print("\n>>>\tSearching for title: " + title + " and resolving download links")
# Author hard-coded so that it pairs with title (currently pride and prejudice)
titles = arml.search_author("Jane Austen")
item_to_download = titles[0]
download_links = arml.resolve_download_links(item_to_download)
print_results([download_links])
|
MIRROR_SOURCES = ["GET", "Cloudflare", "IPFS.io", "Infura"]
class LibgenSearch:
def search_title(self, query):
search_request = SearchRequest(query, search_type="title")
return search_request.aggregate_request_data()
def search_author(self, query):
search_request = SearchRequest(query, search_type="author")
return search_request.aggregate_request_data()
def search_title_filtered(self, query, filters, exact_match=True):
search_request = SearchRequest(query, search_type="title")
results = search_request.aggregate_request_data()
filtered_results = filter_results(
results=results, filters=filters, exact_match=exact_match
)
return filtered_results
def search_author_filtered(self, query, filters, exact_match=True):
search_request = SearchRequest(query, search_type="author")
results = search_request.aggregate_request_data()
filtered_results = filter_results(
results=results, filters=filters, exact_match=exact_match
)
return filtered_results
def resolve_download_links(self, item):
mirror_1 = item["Mirror_1"]
page = requests.get(mirror_1)
soup = BeautifulSoup(page.text, "html.parser")
links = soup.find_all("a", string=MIRROR_SOURCES)
download_links = {link.string: link["href"] for link in links}
return download_links
def filter_results(results, filters, exact_match):
"""
Returns a list of results that match the given filter criteria.
When exact_match = true, we only include results that exactly match
the filters (ie. the filters are an exact subset of the result).
When exact-match = false,
we run a case-insensitive check between each filter field and each result.
exact_match defaults to TRUE -
this is to maintain consistency with older versions of this library.
"""
filtered_list = []
if exact_match:
for result in results:
# check whether a candidate result matches the given filters
if filters.items() <= result.items():
filtered_list.append(result)
else:
filter_matches_result = False
for result in results:
for field, query in filters.items():
if query.casefold() in result[field].casefold():
filter_matches_result = True
else:
filter_matches_result = False
break
if filter_matches_result:
filtered_list.append(result)
return filtered_list
|
# WHY
# The SearchRequest module contains all the internal logic for the library.
#
# This encapsulates the logic,
# ensuring users can work at a higher level of abstraction.
# USAGE
# req = search_request.SearchRequest("[QUERY]", search_type="[title]")
class SearchRequest:
col_names = [
"ID",
"Author",
"Title",
"Publisher",
"Year",
"Pages",
"Language",
"Size",
"Extension",
"Mirror_1",
"Mirror_2",
"Mirror_3",
"Mirror_4",
"Mirror_5",
"Edit",
]
def __init__(self, query, search_type="title"):
self.query = query
self.search_type = search_type
if len(self.query) < 3:
raise Exception("Query is too short")
def strip_i_tag_from_soup(self, soup):
subheadings = soup.find_all("i")
for subheading in subheadings:
subheading.decompose()
def get_search_page(self):
query_parsed = "%20".join(self.query.split(" "))
if self.search_type.lower() == "title":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=title"
)
elif self.search_type.lower() == "author":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=author"
)
search_page = requests.get(search_url)
return search_page
def aggregate_request_data(self):
search_page = self.get_search_page()
soup = BeautifulSoup(search_page.text, "lxml")
self.strip_i_tag_from_soup(soup)
# Libgen results contain 3 tables
# Table2: Table of data to scrape.
information_table = soup.find_all("table")[2]
# Determines whether the link url (for the mirror)
# or link text (for the title) should be preserved.
# Both the book title and mirror links have a "title" attribute,
# but only the mirror links have it filled.(title vs title="libgen.io")
raw_data = [
[
td.a["href"]
if td.find("a")
and td.find("a").has_attr("title")
and td.find("a")["title"] != ""
else "".join(td.stripped_strings)
for td in row.find_all("td")
]
for row in information_table.find_all("tr")[
1:
] # Skip row 0 as it is the headings row
]
output_data = [dict(zip(self.col_names, row)) for row in raw_data]
return output_data
|
min_transformers_version = "4.29.2"
def get_alpaca_farm_model_names():
api = HfApi()
models = api.list_models(author="tatsu-lab", search="alpaca-farm")
model_names = [model.modelId for model in models]
model_names = [name.replace("tatsu-lab/alpaca-farm-", "").replace("-wdiff", "") for name in model_names]
return model_names
def build_argparse(model_names):
parser = argparse.ArgumentParser("Download AlpacaFarm models")
parser.add_argument("--llama-7b-hf-dir", type=str, required=True)
parser.add_argument("--alpaca-farm-model-name", choices=model_names + ["all"], default="all", required=True)
parser.add_argument("--models-save-dir", default="./pretrained_models", type=str)
parser.add_argument("--device", default="cpu", type=str)
parser.add_argument("--path-to-sft10k", type=str, help="Necessary for reconstructing reward models.")
args = parser.parse_args()
if args.path_to_sft10k is None:
args.path_to_sft10k = os.path.join(args.models_save_dir, "sft10k")
return args
def load_weight_diff(hf_hub_name, is_reward_model=False, device="cpu", path_to_sft10k=None):
if is_reward_model:
model_tuned = RewardModel.from_pretrained(
hf_hub_name,
device_map={"": torch.device(device)},
torch_dtype=torch.float32,
flash_attn=False,
config=RewardConfig(backbone_model_name_or_path=path_to_sft10k),
)
else:
model_tuned = transformers.AutoModelForCausalLM.from_pretrained(
hf_hub_name, device_map={"": torch.device(device)}, torch_dtype=torch.float32
)
tokenizer_tuned = transformers.AutoTokenizer.from_pretrained(hf_hub_name)
return model_tuned.eval(), tokenizer_tuned
def load_raw_model(model_dir, device="cpu"):
config_path = os.path.join(model_dir, "config.json")
config = json.load(open(config_path, "r"))
transformers_version = config["transformers_version"]
if transformers_version < min_transformers_version:
logging.warning(
f"Your base LLaMA checkpoint is converted with transformers=={transformers_version}, "
f"but transformers>={min_transformers_version} is expected. "
f"This may produce a corrupted checkpoint and lead to unexpected behavior. "
f"Please regenerate your base LLaMA checkpoint with transformers>={min_transformers_version}."
)
model_raw = transformers.AutoModelForCausalLM.from_pretrained(
model_dir, device_map={"": torch.device(device)}, torch_dtype=torch.float32
)
tokenizer_raw = transformers.AutoTokenizer.from_pretrained(model_dir)
if tokenizer_raw.pad_token is None:
stable_resize_token_embeddings_and_tokenizer(
model=model_raw, tokenizer=tokenizer_raw, special_tokens_dict=dict(pad_token="[PAD]")
)
return model_raw.eval(), tokenizer_raw
def reconstruct_tuned_model(model_tuned, model_raw, is_reward_model=False):
# modifies model_tuned in-place
state_dict_diff = model_tuned.state_dict()
state_dict_raw = model_raw.state_dict()
if is_reward_model:
# reward model adds nesting to main transformer
state_dict_raw = {f"backbone_model.{k}": v for k, v in state_dict_raw.items()}
for key in state_dict_raw:
if state_dict_raw[key].size() != state_dict_diff[key].size():
# weights with a size mismatch are not diff'd in the upload
continue
state_dict_diff[key].add_(state_dict_raw[key])
def integrity_check(model_tuned, hf_hub_name):
model_sum = sum(param.sum() for param in model_tuned.state_dict().values()).item()
model_sum_file = hf_hub_download(repo_id=hf_hub_name, filename="model_sum.txt")
with open(model_sum_file, "r") as f:
model_sum_hf_hub = float(f.read())
return np.isclose(model_sum_hf_hub, model_sum)
if __name__ == "__main__":
model_names = get_alpaca_farm_model_names()
args = build_argparse(model_names)
model_names = model_names if args.alpaca_farm_model_name == "all" else [args.alpaca_farm_model_name]
for model_name in model_names:
print("Downloading", model_name)
hf_hub_name = f"tatsu-lab/alpaca-farm-{model_name}-wdiff"
is_reward_model = "reward-model" in model_name
save_dir = os.path.join(args.models_save_dir, model_name)
model_tuned, tokenizer_tuned = load_weight_diff(hf_hub_name, is_reward_model, args.device, args.path_to_sft10k)
model_raw, tokenizer_raw = load_raw_model(args.llama_7b_hf_dir, args.device)
reconstruct_tuned_model(model_tuned, model_raw, is_reward_model)
if not integrity_check(model_tuned, hf_hub_name):
print("Model weights integrity check failed. Did you use the latest llama-7b HF weights?")
model_tuned.save_pretrained(save_dir)
tokenizer_tuned.save_pretrained(save_dir)
print("Downloaded to", save_dir)
|
def test_batch_select():
input = torch.tensor(
[
[0, 1, 2],
[3, 0, 9],
[6, 7, 8],
]
)
index = torch.tensor([[0, 1], [1, 0], [0, 0]])
actual = torch_ops.batch_select(input, index)
expected = torch.tensor([[0, 1], [0, 3], [6, 6]])
assert actual.eq(expected).all()
def test_pad_sequence_from_left():
sequences = [
torch.tensor([0.0, 1.0, 2.0]),
torch.tensor(
[
3.0,
]
),
torch.tensor(
[
6.0,
7.0,
]
),
]
expected = torch.tensor([[0.0, 1.0, 2.0], [-1.0, -1.0, 3.0], [-1.0, 6.0, 7.0]])
actual = torch_ops.pad_sequence_from_left(sequences, batch_first=True, padding_value=-1)
torch.testing.assert_close(actual, expected)
|
def test_stable_resize_token_embeddings():
model_name_or_paths = (
"gpt2", # Tied weights.
"/juice5/scr5/nlp/llama_model/llama_hf_latest/llama-teeny", # Untied weights.
)
for model_name_or_path in model_name_or_paths:
model: transformers.PreTrainedModel = transformers.AutoModelForCausalLM.from_pretrained(model_name_or_path)
utils.stable_resize_token_embeddings(
model, target_size=model.get_input_embeddings().weight.size(0) + 10, jitter_new_embeddings=True
)
|
logger = logging.get_logger(__name__)
# --- Include standard models to compare activation and help debug ---
class OPTDecoderLayerNF(modeling_opt.OPTDecoderLayer):
pass
class OPTDecoderNF(modeling_opt.OPTDecoder):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.layers = nn.ModuleList([OPTDecoderLayerNF(config) for _ in range(config.num_hidden_layers)])
self.post_init()
def forward(
self,
*args,
**kwargs,
):
out = super(OPTDecoderNF, self).forward(*args, **kwargs)
# print(out.past_key_values[0][0][:, :, -1].sum())
return out
class OPTModelNF(modeling_opt.OPTModel):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.decoder = OPTDecoderNF(config)
self.post_init()
class OPTForCausalLMNF(modeling_opt.OPTForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = OPTModelNF(config)
self.post_init()
# --- End of reckless repetition ---
@pytest.mark.parametrize("padding_side", ("left", "right"))
@pytest.mark.parametrize("dtype", (torch.float16, torch.bfloat16))
@torch.inference_mode()
def test_forward(dtype, padding_side):
# For some reason, the intermediate tests pass (within each Transformer-block assert attention outputs similar).
# But the final logit test doesn't pass.
model_name = "facebook/opt-125m"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
tokenizer.padding_side = padding_side
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1 = flash_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
model2 = modeling_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
with torch.autocast(device_type="cuda", dtype=dtype, enabled=True):
out1 = model1(**tensors, output_hidden_states=True)
out2 = model2(**tensors, output_hidden_states=True)
# Outputs are only guaranteed to match at non-padding locations. Clear irrelevant values.
def clear_padded(tensor):
tensor = tensor.masked_fill(~tensors["attention_mask"][..., None].bool(), 0.0)
return tensor
# Error accumulates! The diff for later hidden states is much larger.
atol = 1e-2 if dtype == torch.float16 else 1e-1
rtol = 0
for h1, h2 in utils.zip_(out1.hidden_states, out2.hidden_states):
h1, h2 = tuple(clear_padded(tensor) for tensor in (h1, h2))
torch.testing.assert_close(h1, h2, atol=atol, rtol=rtol)
def all_test_forward(): # This function is not called by pytest.
for dtype in (torch.float16, torch.bfloat16):
for padding_side in ("left", "right"):
test_forward(dtype, padding_side)
@torch.inference_mode()
def test_decoding():
model_name = "facebook/opt-125m"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# Batch decoding requires left pad, because if right pad, you next token logits could be based on the embedding of
# a pad token, which is wrong (even though the OPT model increments the position id correctly).
# In general, any decoder-only HF transformer requires left pad for batch decoding.
tokenizer.padding_side = "left"
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1: transformers.OPTForCausalLM = flash_opt.OPTForCausalLM.from_pretrained(model_name).to(device).eval()
model2: transformers.OPTForCausalLM = OPTForCausalLMNF.from_pretrained(model_name).to(device).eval()
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=True):
# greedy
out1 = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out1, skip_special_tokens=True)
print(text)
out2 = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out2, skip_special_tokens=True)
print(text)
assert torch.eq(out1, out2).all().item()
# temperature
out = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
out = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
@torch.inference_mode()
def profile_decoding():
# For short sequences, the mixed flash/non-flash approach is still slower.
model_name = "facebook/opt-1.3b"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, cache_dir=constants.DEFAULT_CACHE_DIR)
tokenizer.padding_side = "left"
text = [
"i have a good ",
"this is a very long sentence that is very long and ",
"this is a very long sentence ",
"this is a very",
] * 16
tensors = tokenizer(
text,
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
model1: transformers.OPTForCausalLM = flash_opt.OPTForCausalLM.from_pretrained(
model_name, cache_dir=constants.DEFAULT_CACHE_DIR
)
model2: transformers.OPTForCausalLM = OPTForCausalLMNF.from_pretrained(
model_name, cache_dir=constants.DEFAULT_CACHE_DIR
)
nbatches = 4
with torch.autocast(device_type="cuda", dtype=torch.bfloat16, enabled=True):
for model, msg in (
(model2, "native"),
(model1, "flash"),
):
torch.cuda.empty_cache()
model.to(device).eval()
model.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=500,
do_sample=False,
num_beams=1,
)
torch.cuda.synchronize()
with utils.Timer(msg):
for _ in tqdm.tqdm(range(nbatches)):
model.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=500,
do_sample=False,
num_beams=1,
)
torch.cuda.synchronize()
def main(task, *args, **kwargs):
globals()[task](*args, **kwargs)
if __name__ == "__main__":
# Plain python run for hacking.
# python -m tests.test_flash_opt --task all_test_forward
# pytest for systematic testing.
# pytest -xs tests/test_flash_opt.py
fire.Fire(main)
|
class LLaMADecoderLayerNF(modeling_llama.LlamaDecoderLayer):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
class LLaMAModelNF(transformers.LlamaModel):
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([LLaMADecoderLayerNF(config) for _ in range(config.num_hidden_layers)])
def forward(self, *args, **kwargs):
outputs = super().forward(*args, **kwargs)
print(outputs.past_key_values[0][0].sum())
return outputs
class LLaMAForCausalLMNF(transformers.LlamaForCausalLM):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
self.model = LLaMAModelNF(config)
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
def _prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
@torch.inference_mode()
def test_llama_attention(dtype=torch.float16):
# Test flash and standard attention produce comparable results.
# Right pad only.
device = torch.device("cuda")
batch_size, original_seqlen, num_heads, head_dim = 4, 13, 8, 32
hidden_size = num_heads * head_dim
seqlens = torch.randint(low=1, high=original_seqlen, size=(batch_size,), device=device)
attention_mask = torch.arange(original_seqlen, device=device)[None, :] < seqlens[:, None]
# TODO(lxuechen): Test with past_key_values_length.
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
flash_position_ids = torch.cat(
[
this_position_ids[this_is_selected]
for this_position_ids, this_is_selected in utils.zip_(position_ids, is_selected)
]
)
nonflash_position_ids = position_ids.masked_fill_(attention_mask == 0, 1)
hidden_states = torch.randn(batch_size, original_seqlen, hidden_size, device=device, dtype=dtype)
hidden_states_unpad, indices, cu_seqlens, max_s = unpad_input(hidden_states, attention_mask)
expanded_attention_mask = _prepare_decoder_attention_mask(
attention_mask, (batch_size, original_seqlen), hidden_states, 0
)
config = modeling_llama.LlamaConfig(
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_hidden_layers=1,
num_attention_heads=num_heads,
)
block = flash_llama.LlamaAttention(config=config).to(device)
# Create a small dummy model just for creating rotary tensors.
dummy_model = flash_llama.LlamaModel(config).to(device)
rotary_tensors = dummy_model._make_rotary_tensors(flash_position_ids)
with torch.cuda.amp.autocast(dtype=dtype):
out1, _, _ = block.forward(
hidden_states=hidden_states_unpad,
seqlens=seqlens,
cu_seqlens=cu_seqlens,
rotary_tensors=rotary_tensors,
)
out2, _, _ = super(flash_llama.LlamaAttention, block).forward(
hidden_states=hidden_states,
attention_mask=expanded_attention_mask,
position_ids=nonflash_position_ids,
)
out2, _, _, _ = unpad_input(out2, attention_mask)
torch.testing.assert_close(out1, out2, atol=1e-3, rtol=0.0)
print(".")
@torch.inference_mode()
def test_decoding():
# model_name = "/juice5/scr5/nlp/crfm/human-feedback/models/selfinstruct/llama-teeny"
model_name = "/self/nlp/scr-sync/nlp/crfm/human-feedback/models/selfinstruct/sft_v5_llama_7b_regen_v7_3ep/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# Batch decoding requires left pad, because if right pad, you next token logits could be based on the embedding of
# a pad token, which is wrong (even though the OPT model increments the position id correctly).
# In general, any decoder-only HF transformer requires left pad for batch decoding.
tokenizer.padding_side = "left"
clone_tokenizer = copy.deepcopy(tokenizer)
model1 = flash_llama.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
model2 = transformers.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
if tokenizer.pad_token is None:
utils.stable_resize_token_embeddings_and_tokenizer(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=tokenizer,
model=model1,
)
utils.stable_resize_token_embeddings_and_tokenizer(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=clone_tokenizer,
model=model2,
)
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and "],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
print(f'input size: {tensors["input_ids"].shape}')
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=True):
# greedy
out1 = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out1, skip_special_tokens=True)
print(text)
out2 = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=100,
do_sample=False,
num_beams=1,
)
text = tokenizer.batch_decode(out2, skip_special_tokens=True)
print(text)
print(torch.ne(out1, out2))
print(out1 - out2)
assert torch.eq(out1, out2).all().item()
# temperature
out = model1.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
out = model2.generate(
inputs=tensors["input_ids"],
attention_mask=tensors["attention_mask"],
max_new_tokens=20,
do_sample=True,
temperature=0.7,
top_p=0.9,
num_return_sequences=3,
)
text = tokenizer.batch_decode(out, skip_special_tokens=True)
print(text)
@torch.inference_mode()
def test_forward(dtype=torch.bfloat16, padding_side="left"):
model_name = "/self/nlp/scr-sync/nlp/huggingface_hub_llms/llama-7b/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
tokenizer.padding_side = padding_side
clone_tokenizer = copy.deepcopy(tokenizer)
model1 = flash_llama.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
model2 = transformers.LlamaForCausalLM.from_pretrained(
model_name, device_map={"": device}, low_cpu_mem_usage=True
).eval()
if tokenizer.pad_token is None:
utils.smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=tokenizer,
model=model1,
)
utils.smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=clone_tokenizer,
model=model2,
)
tensors = tokenizer(
["i have a good ", "this is a very long sentence that is very long and ", "what type of food do you like?"],
return_tensors="pt",
padding=True,
)
tensors = {k: v.to(device) for k, v in tensors.items()}
with torch.cuda.amp.autocast(dtype=dtype):
out1 = model1(**tensors, output_hidden_states=True, return_dict=True)
out2 = model2(**tensors, output_hidden_states=True, return_dict=True)
def clear_padded(tensor):
tensor.masked_fill_(~tensors["attention_mask"][..., None].bool(), 0.0)
# tensor[:2, ...] = 0.
return tensor
# Error accumulates! The diff for later hidden states is much larger.
atol = 1e-2 if dtype == torch.float16 else 1e-1
rtol = 0
for layer_idx, (h1, h2) in enumerate(utils.zip_(out1.hidden_states, out2.hidden_states)):
h1, h2 = tuple(clear_padded(tensor) for tensor in (h1, h2))
if not torch.allclose(h1, h2, atol=atol, rtol=rtol):
print(
f"found large error for hidden states at layer: {layer_idx}. "
f"maximum diff: {(h1 - h2).abs().max().item()}. "
f"num entries with large diff: {((h1 - h2).abs() > 3).sum()}. "
f"norm of diff: {(h1 - h2).norm().item()}. "
)
def all_test_forward(): # This function is not called by pytest.
for dtype in (torch.float16, torch.bfloat16):
for padding_side in ("left", "right"):
test_forward(dtype, padding_side)
def test_fused_rms_norm():
device = torch.device("cuda")
norm = transformers.models.llama.modeling_llama.LlamaRMSNorm(256).to(device=device)
x = torch.randn(16, 128, 256, device=device)
y1 = norm(x)
y2 = apex_patch.apex_rmsnorm(norm, x)
torch.testing.assert_close(y2, y1)
def main(task, **kwargs):
# python -m models.flash_llama test_llama_attention
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_llama_attention
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_decoding
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_forward
# CUDA_VISIBLE_DEVICES=0 python -m tests.test_flash_llama test_fused_rms_norm
globals()[task](**kwargs)
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
MODEL_TO_PROMPTS = {
"text-davinci-003": "examples/prompts/v0_inputs_noinputs.json",
"text-davinci-001": "examples/prompts/v0_inputs_noinputs.json",
"gpt-3.5-turbo-0301": "examples/prompts/chatml_v0_char1k_inputs_noinputs.json",
"gpt-4-0314": "examples/prompts/chatml_v0_char500_inputs_noinputs.json",
}
# TODO: all of this could just use alpaca_eval
def main_oai_baselines(
all_instructions: Optional[types.AnyData] = None,
model_name: str = "text-davinci-003",
prompt_path: Optional[str] = None,
save_path: Optional[str] = "examples/data/all_outputs/eval_{model_name}.json",
decoding_args: Optional[openai_utils.OpenAIDecodingArguments] = None,
batch_size: Optional[int] = None,
num_procs: Optional[int] = None,
**kwargs,
) -> pd.DataFrame:
"""Run the OAI baselines.
Parameters
----------
all_instructions : list of dict or DataFrame or Dataset, optional
The instructions to evaluate on. If None uses Farm's eval data
model_name : str, optional
OpenAI model to use for completion.
prompt_path : str, optional
Path to the prompt dictionary. If None, uses the default prompt for the model.
save_path : str, optional
Path to save the outputs to. {model_name} will be formatted. If None, does not save.
kwargs:
Additional arguments to pass to `openai_utils.openai_completion`.
"""
prompt_path = prompt_path or MODEL_TO_PROMPTS[model_name]
if all_instructions is None:
all_instructions = datasets.load_dataset(
"tatsu-lab/alpaca_farm",
"alpaca_farm_evaluation",
cache_dir=constants.DEFAULT_CACHE_DIR,
)["eval"]
prompts, list_dict_data, _ = data_preprocessor.format_prompt_with_data_frame(
df=eval_utils.convert_to_dataframe(all_instructions),
prompt_dict=utils.jload(prompt_path),
)
if openai_utils.requires_chatml(model_name):
decoding_args = decoding_args or openai_utils.OpenAIDecodingArgumentsChat(temperature=0.7, max_tokens=300)
num_procs = num_procs or 5
batch_size = batch_size or 1
else:
decoding_args = decoding_args or openai_utils.OpenAIDecodingArguments(temperature=0.7, max_tokens=300)
num_procs = num_procs or 1
batch_size = batch_size or 10
completions = openai_utils.openai_completion(
prompts=prompts,
decoding_args=decoding_args, # not useful, openai_completion should initialize this if None
return_text=True,
batch_size=batch_size,
model_name=model_name,
num_procs=num_procs,
**kwargs,
)
df_data = eval_utils.convert_to_dataframe(list_dict_data)
df_data["output"] = completions
df_data["generator"] = model_name
columns_to_keep = [
"instruction",
"input",
"output",
"generator",
"dataset",
"datasplit",
]
if save_path is not None:
logger.info(f"Saving to {save_path.format(model_name=model_name)}")
df_data[columns_to_keep].to_json(save_path.format(model_name=model_name), orient="records", indent=2)
return df_data[columns_to_keep]
if __name__ == "__main__":
fire.Fire(main_oai_baselines)
|
logger = logging.get_logger(__name__)
def main():
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = transformers.HfArgumentParser((DataArguments, TrainingArguments))
data_args, training_args = parser.parse_args_into_dataclasses()
accelerator = accelerate_patch.MyAccelerator(
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
log_with=["wandb"],
even_batches=True, # Make sure the batch size on each device is the same.
split_batches=False, # Don't break a batch into smaller chunks.
step_scheduler_with_optimizer=False, # Untie optimizer and scheduler step.
# Value model might not use all parameters (e.g., lm-head) in the forward pass.
kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)],
)
accelerator.init_trackers(
training_args.wandb_project,
init_kwargs={"wandb": {"name": training_args.run_name}},
config=training_args.__dict__,
)
logger.warning(accelerator.state, main_process_only=False) # Each process log their own state.
tokenizer: transformers.PreTrainedTokenizer = make_tokenizer(args=training_args)
model_module: dict = make_models(tokenizer=tokenizer, args=training_args, accelerator=accelerator)
data_module: dict = data_utils.make_rl_data_module(
tokenizer=tokenizer, data_args=data_args, training_args=training_args
)
trainer = QuarkTrainer(
args=training_args,
accelerator=accelerator,
**data_module,
**model_module,
tokenizer=tokenizer,
)
trainer.train()
if __name__ == "__main__":
main()
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
default=None,
metadata={"help": "Name of or path to the base generative LM."},
)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: Literal["alpaca_human_preference", "alpaca_gpt4_preference", "alpaca_noisy_multi_preference"] = field(
default="alpaca_noisy_multi_preference",
metadata={"help": "Name of the dataset. Fetches the human or GPT-4 preference data."},
)
eval_size: int = field(
default=500,
metadata={"help": "Number of examples to split out from training to use for evaluation."},
)
prompt_dict_path: str = field(
default=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
pad_token: str = field(default=constants.DEFAULT_PAD_TOKEN)
cache_dir: str = field(default=constants.DEFAULT_CACHE_DIR)
wandb_project: str = field(default=constants.WANDB_PROJECT)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be left padded to this length always during training."
},
)
label_names: List[str] = field(
default_factory=lambda: ["index_0", "index_1", "choice"],
metadata={
"help": "Names of the labels in the dataset. "
"This is needed to get transformers.Trainer to not throw those tensors away before `compute_loss`."
"By default, the trainer throws away columns it doesn't recognize when creating the "
"`train_dataloader` (see `_remove_unused_columns`). "
},
)
padding: Literal["max_length", "longest"] = field(
default="longest",
metadata={
"help": "Padding strategy. If 'max_length', pads to `model_max_length` always; this might lead to some "
"redundant compute. If 'longest', pads to the longest sequence in the batch, capped by `model_max_length`."
},
)
initialize_model_on_cpu: bool = field(
default=False,
metadata={
"help": "Whether to initialize the model on CPU. "
"If True, models on all processes will be first initialized on CPU; this is RAM-costly but faster."
},
)
end_sequence_with_eos: bool = field(
default=False,
metadata={
"help": "Whether to end sequences with EOS. "
"Ending with EOS might help the reward model realize it's time to predict."
},
)
resume_from_checkpoint: bool = field(default=False, metadata={"help": "If True, loads from last check point."})
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
os.environ["WANDB_PROJECT"] = training_args.wandb_project
if training_args.deepspeed is not None:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = None
elif training_args.initialize_model_on_cpu:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = True
else:
ctx_mgr = common.staggered_object_creation(
local_rank=training_args.local_rank, world_size=training_args.world_size
)
device_map = {"": training_args.device.index}
low_cpu_mem_usage = True
with ctx_mgr:
config = reward_model.RewardConfig(backbone_model_name_or_path=model_args.model_name_or_path)
model = reward_model.RewardModel(
flash_attn=training_args.flash_attn,
fp16=training_args.fp16,
bf16=training_args.bf16,
low_cpu_mem_usage=low_cpu_mem_usage,
device_map=device_map,
config=config,
)
common.let_model_save_mem_when_zero_grad(model)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="left", # Ensure reward is always extracted at the last token embedding.
use_fast=training_args.use_fast_tokenizer,
)
tokenizer.padding = training_args.padding
data_module = data_utils.make_binary_reward_modeling_data_module(
tokenizer=tokenizer,
data_args=data_args,
training_args=training_args,
)
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
compute_metrics=compute_reward_modeling_metrics,
**data_module,
)
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
logger.warning("hooray! training finished successfully! now on to model saving.", main_process_only=True)
trainer.evaluate()
trainer.save_state()
common.safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
logger.warning("hooray again! model saving worked.", main_process_only=True)
if __name__ == "__main__":
main()
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
default=None, metadata={"help": "Name to a huggingface native pretrained model or path to a model on disk."}
)
@dataclass
class DataArguments:
dataset_path: str = field(
default="tatsu-lab/alpaca_farm",
metadata={
"help": "Path to the dataset. Either points to a location on Hugging Face hub or a local folder. "
"If the path points to a local folder, the folder must be structured properly "
"(see documentation for datasets.load_dataset)."
},
)
dataset_name: Optional[str] = field(
default="alpaca_instructions",
metadata={"help": "Name of the dataset to load -- the argument `name` passed to `datasets.load_dataset`."},
)
train_splits: List[str] = field(
default_factory=lambda: ["sft"],
metadata={"help": "Splits to use for training. This must not be an empty list."},
)
eval_splits: Optional[List[str]] = field(
default_factory=lambda: ["val"],
metadata={
"help": "Splits to use for evaluation. "
"If None, empty, or the splits are not found in the dataset, no evaluation is performed."
},
)
prompt_dict_path: str = field(
default=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
pad_token: str = field(default=constants.DEFAULT_PAD_TOKEN)
cache_dir: str = field(default=constants.DEFAULT_CACHE_DIR)
wandb_project: str = field(default=constants.WANDB_PROJECT)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded to this length (and possibly truncated)."
"Enforcing a consistent max length ensures memory usage is constant and predictable."
},
)
padding: Literal["max_length", "longest"] = field(
default="longest",
metadata={
"help": "Padding strategy. If 'max_length', pads to `model_max_length` always; this might lead to some "
"redundant compute. If 'longest', pads to the longest sequence in the batch, capped by `model_max_length`."
},
)
initialize_model_on_cpu: bool = field(
default=False,
metadata={
"help": "Whether to initialize the model on CPU. "
"If True, models on all processes will be first initialized on CPU; this is RAM-costly but faster."
},
)
resume_from_checkpoint: bool = field(default=False, metadata={"help": "If True, loads from last check point."})
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
os.environ["WANDB_PROJECT"] = training_args.wandb_project
if training_args.deepspeed is not None:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = None
elif training_args.initialize_model_on_cpu:
ctx_mgr = contextlib.nullcontext()
device_map = None
low_cpu_mem_usage = True
else:
ctx_mgr = common.staggered_object_creation(
local_rank=training_args.local_rank, world_size=training_args.world_size
)
device_map = {"": training_args.device.index}
low_cpu_mem_usage = True
with ctx_mgr:
model: transformers.PreTrainedModel = common.make_generative_lm(
model_name_or_path=model_args.model_name_or_path,
flash_attn=training_args.flash_attn,
fp16=training_args.fp16,
bf16=training_args.bf16,
config=transformers.AutoConfig.from_pretrained(model_args.model_name_or_path),
cache_dir=training_args.cache_dir,
low_cpu_mem_usage=low_cpu_mem_usage,
device_map=device_map,
)
common.let_model_save_mem_when_zero_grad(model)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right", # Ensures properly masking out the source tokens.
use_fast=training_args.use_fast_tokenizer,
)
tokenizer.padding = training_args.padding
# Collect special tokens. Only add if non-existent.
special_tokens_dict = dict(additional_special_tokens=[])
if tokenizer.pad_token is None:
special_tokens_dict["pad_token"] = training_args.pad_token
if tokenizer.eos_token is None:
special_tokens_dict["eos_token"] = constants.DEFAULT_EOS_TOKEN
if tokenizer.bos_token is None:
special_tokens_dict["bos_token"] = constants.DEFAULT_BOS_TOKEN
if tokenizer.unk_token is None:
special_tokens_dict["unk_token"] = constants.DEFAULT_UNK_TOKEN
utils.stable_resize_token_embeddings_and_tokenizer(model, tokenizer, special_tokens_dict)
data_module: dict = data_utils.make_supervised_data_module(
tokenizer=tokenizer,
data_args=data_args,
training_args=training_args,
)
# Tokenizer is only supplied so that it gets saved; this makes loading easier.
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
**data_module,
)
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
logger.warning("hooray! training finished successfully! now on to model saving.", main_process_only=True)
trainer.save_state()
common.safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
logger.warning("hooray again! model saving worked.", main_process_only=True)
if __name__ == "__main__":
main()
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
def main():
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = transformers.HfArgumentParser((DataArguments, TrainingArguments))
data_args, training_args = parser.parse_args_into_dataclasses()
accelerator = accelerate_patch.MyAccelerator(
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
log_with=["wandb"],
even_batches=True, # Make sure the batch size on each device is the same.
split_batches=False, # Don't break a batch into smaller chunks.
step_scheduler_with_optimizer=False, # Untie optimizer and scheduler step.
# Value model might not use all parameters (e.g., lm-head) in the forward pass.
kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)],
)
accelerator.init_trackers(
training_args.wandb_project,
init_kwargs={"wandb": {"name": training_args.run_name}},
config=training_args.__dict__,
)
logger.warning(accelerator.state, main_process_only=False) # Each process log their own state.
tokenizer: transformers.PreTrainedTokenizer = make_tokenizer(args=training_args)
model_module: dict = make_models(tokenizer=tokenizer, args=training_args, accelerator=accelerator)
data_module: dict = data_utils.make_rl_data_module(
tokenizer=tokenizer, data_args=data_args, training_args=training_args
)
trainer = PPOTrainer(
args=training_args,
accelerator=accelerator,
**data_module,
**model_module,
tokenizer=tokenizer,
)
trainer.train()
if __name__ == "__main__":
main()
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sample_mode_formatter = "temperature={temperature},max_new_tokens={max_new_tokens},seed={seed}"
def run_decode(
decoder_name_or_path: AnyPath,
dataset_path="tatsu-lab/alpaca_farm",
dataset_name: Optional[str] = "alpaca_farm_evaluation",
split="eval",
prompt_dict_path=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
output_path: AnyPathOrNone = None,
max_instances=sys.maxsize,
per_device_batch_size=4,
temperature=1.0,
max_new_tokens=300,
num_return_sequences=4,
mixed_precision=None,
tf32=False,
seed: Optional[int] = None,
):
"""Decode samples from the policy language model.
Args:
decoder_name_or_path: Name or path of the policy language model.
dataset_path: Path to the dataset for datasets.load_dataset.
dataset_name: Name of the dataset for datasets.load_dataset.
prompt_dict_path: Path to the prompt dictionary for formatting the instruction and input into a string.
output_path: Optional path to save the decoding results.
split: Split of the dataset to decode.
max_instances: Maximum number of instances to decode.
per_device_batch_size: Batch size for reranking for each device.
temperature: Temperature for decoding.
max_new_tokens: Maximum number of new tokens to generate.
seed: Random seed for decoding.
num_return_sequences: Number of sequences to return per each prompt.
mixed_precision: Mixed precision mode for the reward model.
tf32: Whether to use tensorfloat32 for matrix multiplication.
Returns:
List of dict data with keys.
If num_return_sequences > 1, each 'completion' is a list of strings. Otherwise, it is a string.
"""
dataset = datasets.load_dataset(dataset_path, dataset_name)
prompts, list_dict_data, metadata = data_preprocessor.format_prompt_with_data_frame(
df=pd.DataFrame(dataset[split]),
prompt_dict=utils.jload(prompt_dict_path),
)
prompts, list_dict_data = prompts[:max_instances], list_dict_data[:max_instances]
outputs = decode.decode_prompts_with_huggingface(
model_name_or_path=decoder_name_or_path,
prompts=prompts,
decoding_args=decode.HFDecodingArguments(
temperature=temperature, max_new_tokens=max_new_tokens, num_return_sequences=num_return_sequences
),
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
seed=seed,
)
sample_mode = sample_mode_formatter.format(temperature=temperature, max_new_tokens=max_new_tokens, seed=seed)
return_list_dict_data = [
{
"instruction": dict_data["instruction"],
"input": dict_data["input"],
"output": output,
"prompt": prompt,
"decoder_name_or_path": decoder_name_or_path,
"sample_mode": sample_mode,
}
for dict_data, prompt, output in utils.zip_(list_dict_data, prompts, outputs)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def run_rerank(
list_dict_data_or_path: Union[Sequence[Dict], AnyPath],
scorer_name_or_path: AnyPath,
output_path: AnyPathOrNone = None,
per_device_batch_size=4,
rerank_top_k=1,
mixed_precision=None,
tf32=False,
flash_attn=False,
):
"""Rerank sequences with reward model.
Args:
list_dict_data_or_path: Sequence of dict data or a path to it.
Each dict should have the keys 'prompt' and 'completion' with string values that can be added together.
scorer_name_or_path: Name or path of the reward model.
output_path: Optional path to save the rerank results.
per_device_batch_size: Batch size for reranking for each device.
rerank_top_k: Keep top k among the reranked sequences.
mixed_precision: Mixed precision mode for the reward model.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
Rerank results as a list of dict data.
"""
if isinstance(list_dict_data_or_path, (str, pathlib.Path)):
list_dict_data_or_path = utils.jload(list_dict_data_or_path)
sequences = [
[dict_data["prompt"] + output for output in dict_data["output"]] for dict_data in list_dict_data_or_path
]
# TODO(lxuechen): FlashAttention reward model is not correctly loaded.
top_sequences, top_indices = score.rerank_sequences_with_huggingface(
sequences=sequences,
model_name_or_path=scorer_name_or_path,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
rerank_top_k=rerank_top_k,
)
return_list_dict_data = [
{
"instruction": dict_data["instruction"],
"input": dict_data["input"],
"output": dict_data["output"],
"top_sequence": top_sequence,
"top_index": top_index,
"scorer_name_or_path": scorer_name_or_path,
}
for top_sequence, top_index, dict_data in utils.zip_(top_sequences, top_indices, list_dict_data_or_path)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def run_best_of_n(
decoder_name_or_path: AnyPath,
scorer_name_or_path: AnyPath,
output_path: AnyPathOrNone = None,
prompt_dict_path=pathlib.Path(__file__).parent / "prompts" / "v0_inputs_noinputs.json",
split="val",
per_device_batch_size=4,
max_instances=sys.maxsize,
temperature=1.0,
num_return_sequences=4,
max_new_tokens=300,
mixed_precision=None,
tf32=False,
flash_attn=False,
):
"""Chain together decoding and rerank."""
decode_return_list_dict_data = run_decode(
decoder_name_or_path=decoder_name_or_path,
prompt_dict_path=prompt_dict_path,
split=split,
max_instances=max_instances,
per_device_batch_size=per_device_batch_size,
temperature=temperature,
num_return_sequences=num_return_sequences,
max_new_tokens=max_new_tokens,
mixed_precision=mixed_precision,
tf32=tf32,
)
rerank_return_list_dict_data = run_rerank(
list_dict_data_or_path=decode_return_list_dict_data,
scorer_name_or_path=scorer_name_or_path,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
)
# Convert best-k-of-n into best-of-n.
return_list_dict_data = [
{
"instruction": rerank_dict_data["instruction"],
"input": rerank_dict_data["input"],
"output": rerank_dict_data["output"][rerank_dict_data["top_index"][0]],
"decoder_name_or_path": decoder_name_or_path,
"scorer_name_or_path": scorer_name_or_path,
"sample_mode": f"best_of_n_{decode_data_dict['sample_mode']}",
}
for decode_data_dict, rerank_dict_data in utils.zip_(decode_return_list_dict_data, rerank_return_list_dict_data)
]
if output_path is not None and distributed_utils.is_main_process():
utils.jdump(return_list_dict_data, output_path)
return return_list_dict_data
def main(task, **kwargs):
globals()[task](**kwargs)
if __name__ == "__main__":
fire.Fire(main)
|
def update_version(file_path, new_version):
# Read in the file
with open(file_path, "r") as file:
filedata = file.read()
# Replace the target string
version_regex = r"__version__ = ['\"]([^'\"]*)['\"]"
filedata = re.sub(version_regex, f"__version__ = '{new_version}'", filedata)
# Write the file out again
with open(file_path, "w") as file:
file.write(filedata)
if __name__ == "__main__":
# Get the version from command line arguments
new_version = sys.argv[1]
# Update the version
update_version("src/alpaca_farm/__init__.py", new_version)
|
# Copyright 2023 The Alpaca Team
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MultiProcessAdapter(logging.LoggerAdapter):
"""
An adapter to assist with logging in multiprocess.
`log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
or only the main executed one. Default is `main_process_only=True`.
This is almost like the logger in accelerate, but does not have annoying accelerate dependency.
"""
@staticmethod
def _should_log(main_process_only):
process_index_flag = is_main_process()
return not main_process_only or (main_process_only and process_index_flag)
def log(self, level, msg, *args, **kwargs):
"""
Delegates logger call after checking if we should log.
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
or only the main executed one. Default is `True` if not passed
"""
main_process_only = kwargs.pop("main_process_only", True)
if self.isEnabledFor(level) and self._should_log(main_process_only):
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def get_logger(name: str, log_level: str = None):
"""
Returns a `logging.Logger` for `name` that can handle multiprocessing.
**By default, the logger only logs on the main process -- the process with env var LOCAL_RANK=0.**
If a log should be called on all processes, pass `main_process_only=False`
Args:
name (`str`):
The name for the logger, such as `__file__`
log_level (`str`, *optional*):
The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
Example:
```python
>>> from alpaca_farm.logging import get_logger
>>> logger = get_logger(__name__)
>>> logger.info("My log", main_process_only=False)
>>> logger.debug("My log", main_process_only=True)
>>> logger = get_logger(__name__, accelerate_log_level="DEBUG")
>>> logger.info("My log")
>>> logger.debug("My second log")
```
"""
logger = logging.getLogger(name)
if log_level is not None:
logger.setLevel(log_level.upper())
return MultiProcessAdapter(logger, {})
class disable_logging(object):
def __enter__(self, *args, **kwargs):
logging.disable(logging.CRITICAL)
return self
def __exit__(self, *args, **kwargs):
logging.disable(logging.NOTSET)
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorator
|
# Copyright 2023 The Alpaca Team
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
def create_optimizer(args, model: nn.Module, optimizer: Optional[optim.Optimizer] = None):
"""Create optimizer for trainer.
This is detached version of the `Trainer.create_optimizer` method.
We don't support sagemaker and fairscale for simplicity.
Reference:
https://github.com/huggingface/transformers/blob/main/src/transformers/trainer.py
"""
opt_model = model
if optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)],
"weight_decay": args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(args)
optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f"skipped {module}: {skipped / 2 ** 20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped / 2 ** 20}M params")
return optimizer
def create_scheduler(args, optimizer, lr_scheduler, num_training_steps):
"""Create scheduler for trainer.
This is detached version of the `Trainer.create_scheduler` method.
Reference:
https://github.com/huggingface/transformers/blob/main/src/transformers/trainer.py
"""
if lr_scheduler is None:
lr_scheduler = get_scheduler(
args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return lr_scheduler
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "<s>"
DEFAULT_UNK_TOKEN = "<unk>"
DEFAULT_CACHE_DIR = None
WANDB_PROJECT = "alpaca_farm"
MODEL_NAME_TO_CONFIG = {
"llama-7b": {"model_type": "llama", "num_hidden_layers": 32, "hidden_size": 4096},
"llama-13b": {"model_type": "llama", "num_hidden_layers": 40, "hidden_size": 5120},
"llama-30b": {"model_type": "llama", "num_hidden_layers": 60, "hidden_size": 6656},
"llama-65b": {"model_type": "llama", "num_hidden_layers": 80, "hidden_size": 8192},
}
MODEL_NAME_TO_FAMILY = {
"distilgpt2": "gpt2",
"gpt2": "gpt2",
"gpt2-medium": "gpt2",
"gpt2-large": "gpt2",
"gpt2-xl": "gpt2",
"facebook/opt-iml-max-1.3b": "opt",
"facebook/opt-125m": "opt",
"facebook/opt-350m": "opt",
"facebook/opt-1.3b": "opt",
"facebook/opt-2.7b": "opt",
"facebook/opt-6.7b": "opt",
"facebook/opt-13b": "opt",
"facebook/opt-30b": "opt",
"llama-teeny": "llama",
"llama-7b": "llama",
"llama-13b": "llama",
"llama-30b": "llama",
"llama-65b": "llama",
"EleutherAI/pythia-2.8b-deduped": "pythia",
"EleutherAI/pythia-6.9b-deduped": "pythia",
"EleutherAI/pythia-12b-deduped": "pythia",
}
# Huggingface model naming convention.
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
SAFE_WEIGHTS_NAME = "model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
GENERATION_CONFIG_NAME = "generation_config.json"
MODEL_CARD_NAME = "modelcard.json"
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.1.8'
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
AnyPath = Union[str, os.PathLike, pathlib.Path]
AnyPathOrNone = Optional[AnyPath]
AnyData = Union[Sequence[dict[str, Any]], pd.DataFrame, datasets.Dataset]
Numeric = Union[int, float]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
TensorList = List[Tensor]
StrOrStrs = Union[str, Sequence[str]]
if torch.__version__ < "2.0.0":
LRScheduler = torch.optim.lr_scheduler._LRScheduler # noqa
else:
LRScheduler = torch.optim.lr_scheduler.LRScheduler
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Trainer(transformers.Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
# input_ids, attention_mask each of size (bsz, num_candidates, seq_len).
# index_0, index_1 each of size (bsz, num_pairs); indexes into input_ids.
# choice of size (bsz, num_pairs); 1 if index_1's seq is chosen, 0 otherwise.
input_ids, attention_mask, index_0, index_1, choice = common.unpack_dict(
inputs, keys=("input_ids", "attention_mask", "index_0", "index_1", "choice")
)
num_candidates, num_pairs = input_ids.size(1), choice.size(1)
input_ids_flat, attention_mask_flat = tuple(
einops.rearrange(x, "b c l -> (b c) l") for x in (input_ids, attention_mask)
)
outputs = model(input_ids=input_ids_flat, attention_mask=attention_mask_flat)
rewards_flat = outputs.rewards
rewards = einops.rearrange(rewards_flat, "(b c) -> b c", c=num_candidates) # Size: (bsz, num_candidates).
rewards_0, rewards_1 = tuple(
torch_ops.batch_select(rewards, index) for index in (index_0, index_1)
) # Size: (bsz, num_pairs).
logits = rewards_1 - rewards_0 # Size: (bsz, num_pairs).
# Type casting of `choice` is due to amp.autocast context manager.
loss = F.binary_cross_entropy_with_logits(logits, choice.to(logits.dtype), reduction="mean")
return (loss, dict(logits=logits)) if return_outputs else loss
def compute_reward_modeling_metrics(eval_prediction: EvalPrediction) -> Dict:
# eval_prediction.label_ids is a tuple that matches up with `training_args.label_names`.
logits = torch.tensor(eval_prediction.predictions).squeeze(-1)
labels = torch.tensor(eval_prediction.label_ids[-1]).squeeze(-1)
predictions = (logits >= 0.0).long()
accuracy = predictions.eq(labels).float().mean().item()
label_positive_rate = (labels == 1).float().mean().item()
positive_rate = (predictions == 1).float().mean().item()
true_positive_rate = (predictions * labels).float().sum().item() / labels.sum().item()
false_positive_rate = (predictions * (1 - labels)).float().sum().item() / (1 - labels).sum().item()
return dict(
accuracy=accuracy,
label_positive_rate=label_positive_rate,
positive_rate=positive_rate,
true_positive_rate=true_positive_rate,
false_positive_rate=false_positive_rate,
)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BinaryRewardModelingDataset,
DataCollatorForBinaryRewardModelingDataset,
DataCollatorForSFTDataset,
DataCollatorForStackableDataset,
QueryDataset,
SFTDataset,
split_train_into_train_and_eval,
)
logger = logging.get_logger(__name__)
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.train_splits])
train_dataset = SFTDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
)
eval_dataset = None
if data_args.eval_splits is not None:
found_splits = [
pd.DataFrame(alpaca_instructions[split]) for split in data_args.eval_splits if split in alpaca_instructions
]
if len(found_splits) > 0:
eval_df = pd.concat(found_splits)
eval_dataset = SFTDataset(
df=eval_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
)
if eval_dataset is None:
logger.warning("Didn't find evaluation dataset. Disabling evaluation.")
training_args.do_eval = False
data_collator = DataCollatorForSFTDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator)
def make_binary_reward_modeling_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_human_preference = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.DataFrame(alpaca_human_preference["preference"])
train_dataset = BinaryRewardModelingDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
end_sequence_with_eos=training_args.end_sequence_with_eos,
)
train_dataset, eval_dataset = split_train_into_train_and_eval(
train_dataset=train_dataset,
eval_size=data_args.eval_size,
seed=training_args.seed,
)
data_collator = DataCollatorForBinaryRewardModelingDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator)
def make_rl_data_module(
tokenizer: transformers.PreTrainedTokenizer,
data_args,
training_args,
):
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.train_splits])
eval_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.eval_splits])
if getattr(training_args, "num_reward_tokens", 0) > 0 and not getattr(
training_args, "train_on_best_quantile", True
):
prompt_postprocessor = RewardConditioningPromptPostprocessor()
else:
prompt_postprocessor = None
train_dataset = QueryDataset(
df=train_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
query_len=training_args.query_len,
prompt_postprocessor=prompt_postprocessor,
)
eval_dataset = QueryDataset(
df=eval_df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
query_len=training_args.query_len,
prompt_postprocessor=prompt_postprocessor,
)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=DataCollatorForStackableDataset())
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light wrapper for OpenAI API.
Reference API:
https://beta.openai.com/docs/api-reference/completions/create
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/openai_utils.py
"""
StrOrOpenAIObject = Union[str, openai_object.OpenAIObject]
openai_org = os.getenv("OPENAI_ORG")
if openai_org is not None:
openai.organization = openai_org
logging.warning(f"Switching to organization: {openai_org} for OAI API key.")
@dataclasses.dataclass
class OpenAIDecodingArgumentsBase(object):
max_tokens: int = 1800
temperature: float = 0.2
top_p: float = 1.0
n: int = 1
stream: bool = False
stop: Optional[Sequence[str]] = None
# Heuristic stop when about to generate next function.
# stop: Optional[Tuple[str, ...]] = ("}\n\nstatic", "}\n\n/*")
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
# If you need these, pass them in as decoding_kwargs.
# best_of: int = 1
# logit_bias: dict = None
@dataclasses.dataclass
class OpenAIDecodingArguments(OpenAIDecodingArgumentsBase):
suffix: Optional[str] = None
logprobs: Optional[int] = None
echo: bool = False
@dataclasses.dataclass
class OpenAIDecodingArgumentsChat(OpenAIDecodingArgumentsBase):
# currently there are no arguments that are different than not chat version
pass
def requires_chatml(model: str) -> bool:
"""Whether a model requires the ChatML format."""
# TODO: this should ideally be an OpenAI function... Maybe it already exists?
return "turbo" in model or "gpt-4" in model
def convert_dict_to_openai_object(data: dict) -> openai_object.OpenAIObject:
return_data = openai_object.OpenAIObject()
return_data.update(data)
return return_data
def _openai_completion_helper(
prompt_batch: Sequence[StrOrOpenAIObject],
is_chat: bool,
sleep_time: int,
openai_organization_ids: Optional[Sequence[str]] = None,
openai_api_key: Optional[str] = os.environ.get("OPENAI_API_KEY", None),
**shared_kwargs,
):
if openai_api_key is not None:
openai.api_key = openai_api_key
# randomly select orgs
if openai_organization_ids is not None:
openai.organization = random.choice(openai_organization_ids)
# copy shared_kwargs to avoid modifying it
shared_kwargs = copy.deepcopy(shared_kwargs)
while True:
try:
if is_chat:
completion_batch = openai.ChatCompletion.create(messages=prompt_batch[0], **shared_kwargs)
choices = completion_batch.choices
for choice in choices:
assert choice.message.role == "assistant"
if choice.message.content == "":
choice["text"] = " " # annoying doesn't allow empty string
else:
choice["text"] = choice.message.content
else:
completion_batch = openai.Completion.create(prompt=prompt_batch, **shared_kwargs)
choices = completion_batch.choices
for choice in choices:
choice["total_tokens"] = completion_batch.usage.total_tokens / len(prompt_batch)
break
except openai.error.OpenAIError as e:
logging.warning(f"OpenAIError: {e}.")
if "Please reduce your prompt" in str(e):
shared_kwargs["max_tokens"] = int(shared_kwargs["max_tokens"] * 0.8)
logging.warning(f"Reducing target length to {shared_kwargs['max_tokens']}, Retrying...")
else:
logging.warning("Hit request rate limit; retrying...")
if openai_organization_ids is not None and len(openai_organization_ids) > 1:
openai.organization = random.choice(
[o for o in openai_organization_ids if o != openai.organization]
)
logging.warning(f"Switching to organization: {openai.organization} for OAI API key.")
time.sleep(sleep_time) # Annoying rate limit on requests.
return choices
def _openai_completion(
prompts: Union[str, Sequence[str], Sequence[dict[str, str]], dict[str, str]],
decoding_args: OpenAIDecodingArguments,
model_name="text-davinci-003",
sleep_time=2,
batch_size=1,
max_instances=sys.maxsize,
max_batches=sys.maxsize,
return_text=False,
num_procs=1,
**decoding_kwargs,
) -> Union[Union[StrOrOpenAIObject], Sequence[StrOrOpenAIObject], Sequence[Sequence[StrOrOpenAIObject]],]:
"""Decode with OpenAI API.
Args:
prompts: A string or a list of strings to complete. If it is a chat model the strings should be formatted
as explained here: https://github.com/openai/openai-python/blob/main/chatml.md. If it is a chat model
it can also be a dictionary (or list thereof) as explained here:
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
decoding_args: Decoding arguments.
model_name: Model name. Can be either in the format of "org/model" or just "model".
sleep_time: Time to sleep once the rate-limit is hit.
batch_size: Number of prompts to send in a single request. Only for non chat model.
max_instances: Maximum number of prompts to decode.
max_batches: Maximum number of batches to decode. This argument will be deprecated in the future.
return_text: If True, return text instead of full completion object (which contains things like logprob).
decoding_kwargs: Additional decoding arguments. Pass in `best_of` and `logit_bias` if you need them.
Returns:
A completion or a list of completions.
Depending on return_text, return_openai_object, and decoding_args.n, the completion type can be one of
- a string (if return_text is True)
- an openai_object.OpenAIObject object (if return_text is False)
- a list of objects of the above types (if decoding_args.n > 1)
"""
logging.info(f"Decoding with OpenAI API model {model_name} and numproc == {num_procs}.")
is_single_prompt = isinstance(prompts, (str, dict))
if is_single_prompt:
prompts = [prompts]
# convert prompts to chat format
is_chat = requires_chatml(model_name)
is_chat_format = isinstance(prompts[0], dict)
if is_chat:
if batch_size > 1:
logging.warning("batch_size > 1 is not supported yet for chat models. Setting to 1")
batch_size = 1
if not is_chat_format:
prompts = [prompt_to_chatml(prompt) for prompt in prompts]
if max_batches < sys.maxsize:
logging.warning(
"`max_batches` will be deprecated in the future, please use `max_instances` instead."
"Setting `max_instances` to `max_batches * batch_size` for now."
)
max_instances = max_batches * batch_size
prompts = prompts[:max_instances]
num_prompts = len(prompts)
prompt_batches = [
prompts[batch_id * batch_size : (batch_id + 1) * batch_size]
for batch_id in range(int(math.ceil(num_prompts / batch_size)))
]
shared_kwargs = dict(
model=model_name,
**decoding_args.__dict__,
)
shared_kwargs.update(decoding_kwargs) # override default arguments if specified
with multiprocessing.Pool(num_procs) as p:
partial_completion_helper = functools.partial(
_openai_completion_helper, sleep_time=sleep_time, is_chat=is_chat, **shared_kwargs
)
completions = list(
tqdm.tqdm(
p.imap(partial_completion_helper, prompt_batches),
desc="prompt_batches",
total=len(prompt_batches),
)
)
# flatten the list
completions = [completion for completion_batch in completions for completion in completion_batch]
if return_text:
completions = [completion.text for completion in completions]
if decoding_args.n > 1:
# make completions a nested list, where each entry is a consecutive decoding_args.n of original entries.
completions = [completions[i : i + decoding_args.n] for i in range(0, len(completions), decoding_args.n)]
if is_single_prompt:
# Return non-tuple if only 1 input and 1 generation.
(completions,) = completions
return completions
def string_to_dict(to_convert):
"""Converts a string with equal signs to dictionary. E.g.
>>> string_to_dict(" name=user university=stanford")
{'name': 'user', 'university': 'stanford'}
"""
return {s.split("=", 1)[0]: s.split("=", 1)[1] for s in to_convert.split(" ") if len(s) > 0}
def prompt_to_chatml(prompt: str, start_token: str = "<|im_start|>", end_token: str = "<|im_end|>"):
"""Convert a text prompt to ChatML formal
Examples
--------
>>> prompt = "<|im_start|>system\nYou are a helpful assistant.\n<|im_end|>\n<|im_start|>system
name=example_user\nKnock knock.\n<|im_end|>\n<|im_start|>system name=example_assistant\nWho's
there?\n<|im_end|>\n<|im_start|>user\nOrange.\n<|im_end|>"
>>> print(prompt)
<|im_start|>system
You are a helpful assistant.
<|im_end|>
<|im_start|>system name=example_user
Knock knock.
<|im_end|>
<|im_start|>system name=example_assistant
Who's there?
<|im_end|>
<|im_start|>user
Orange.
<|im_end|>
>>> prompt_to_chatml(prompt)
[{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'Knock knock.'},
{'role': 'assistant', 'content': "Who's there?"},
{'role': 'user', 'content': 'Orange.'}]
"""
prompt = prompt.strip()
assert prompt.startswith(start_token)
assert prompt.endswith(end_token)
message = []
for p in prompt.split("<|im_start|>")[1:]:
newline_splitted = p.split("\n", 1)
role = newline_splitted[0].strip()
content = newline_splitted[1].split(end_token, 1)[0].strip()
if role.startswith("system") and role != "system":
# based on https://github.com/openai/openai-cookbook/blob/main/examples
# /How_to_format_inputs_to_ChatGPT_models.ipynb
# and https://github.com/openai/openai-python/blob/main/chatml.md it seems that system can specify a
# dictionary of other args
other_params = string_to_dict(role.split("system", 1)[-1])
role = "system"
else:
other_params = dict()
message.append(dict(content=content, role=role, **other_params))
return message
# Keep the private function for backwards compat.
openai_completion = _openai_completion
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
def apex_is_installed():
try:
import apex
return True
except ImportError as _:
return False
def flash_attn_is_installed():
try:
import flash_attn
return True
except ImportError as _:
return False
class staggered_object_creation(object):
"""
Objection creation in a distributed setting could be very RAM-intensive.
This function staggers the creation of objects on odd and even ranks, so that not all objects
are created at once.
Assumes local_rank == -1 means no distributed training.
"""
def __init__(self, local_rank: int, world_size: int):
super().__init__()
self.local_rank = local_rank
self.world_size = world_size
def __enter__(self, *args, **kwargs):
del args, kwargs
if self.world_size > 1 and self.local_rank % 2 == 0:
dist.barrier()
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
if self.world_size > 1:
if self.local_rank % 2 == 1:
dist.barrier()
dist.barrier() # Final safety barrier.
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorator
def make_generative_lm(
model_name_or_path: str,
flash_attn: bool,
fp16: Optional[bool] = None,
bf16: Optional[bool] = None,
mixed_precision: Optional[str] = None,
**kwargs,
):
if fp16 is None:
fp16 = mixed_precision == "fp16"
if bf16 is None:
bf16 = mixed_precision == "bf16"
if flash_attn and not fp16 and not bf16:
logger.warning(
"Flash attention does not support fp32. Reverting to standard attention.", main_process_only=True
)
flash_attn = False
if flash_attn and flash_attn_is_installed():
from .flash_models import flash_llama
model_cls = flash_llama.LlamaForCausalLM
else:
model_cls = transformers.LlamaForCausalLM
return model_cls.from_pretrained(model_name_or_path, **kwargs)
def let_model_save_mem_when_zero_grad(model: nn.Module):
def new_zero_grad(self, set_to_none: bool = True) -> None:
r"""Sets gradients of all model parameters to zero. See similar function
under :class:`torch.optim.Optimizer` for more context.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
See :meth:`torch.optim.Optimizer.zero_grad` for details.
"""
if getattr(self, "_is_replica", False):
warnings.warn(
"Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "
"The parameters are copied (in a differentiable manner) from the original module. "
"This means they are not leaf nodes in autograd and so don't accumulate gradients. "
"If you need gradients in your forward method, consider using autograd.grad instead."
)
for p in self.parameters():
if p.grad is not None:
if set_to_none:
p.grad = None
else:
if p.grad.grad_fn is not None:
p.grad.detach_()
else:
p.grad.requires_grad_(False)
p.grad.zero_()
# Make zero_grad `set_to_none=True` by default.
# Need this runtime method patching, since self is used within zero_grad.
model.zero_grad = types.MethodType(new_zero_grad, model)
return model
def safe_save_model_for_hf_trainer(
trainer: transformers.Trainer, output_dir: str, give_rw_access=True, rank0_only=True
):
"""Collects the state dict and dump to disk."""
now = time.perf_counter()
if trainer.fsdp is not None:
# NOTE(rtaori): technically should be rank0_only=True (otherwise duplicates model in RAM),
# but currently there seems to be a bug in FSDP that causes it to hang.
# Migration to Pytorch 2 should fix this.
# Once we migrate, we can also implement more efficient loading:
# https://github.com/pytorch/pytorch/blob/master/torch/distributed/fsdp/api.py#L286-L295
# NOTE(tianyi): tested on sphinx6, seems to work fine with rank0_only=False
cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
with FSDP.state_dict_type(trainer.model, StateDictType.FULL_STATE_DICT, cfg):
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
trainer._save(output_dir, state_dict=state_dict) # noqa
elif trainer.deepspeed is not None:
# --- The stuff below is almost a copy from transformers.trainer.Trainer.save_model (transformers==4.27.3) ---
# this takes care of everything as long as we aren't under zero3
if trainer.args.should_save:
trainer._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either use deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if trainer.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
logger.warning(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not trainer.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
trainer.deepspeed.save_checkpoint(output_dir)
# --- End of shameless copy ---
# Auto-convert the checkpoint to fp32 for easier downstream use.
# Only rank0 shall do the checkpoint conversion to prevent race conditions.
if trainer.args.should_save:
try:
os.system(
f"python {output_dir}/zero_to_fp32.py '{output_dir}' '{output_dir}/pytorch_model.bin'"
)
except Exception as e:
logger.fatal(f"Failed to convert zero3 checkpoint to fp32: {e}")
else: # Also support saving for non-FSDP models.
# NOTE(lxuechen): Saving and loading T5 has weird pickle issues due to device map.
# Wasn't able to exactly pinpoint. But saving to and loading from CPU seems to work.
# In principle, trainer.save_model() should do the same thing, but breaks in practice.
# We drop T5 support.
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
if trainer.args.should_save:
if give_rw_access:
try:
os.system(f"chmod -R a+xwr {output_dir}")
except Exception as e:
logger.fatal(f"Failed to give read-write access to {output_dir}: {e}")
logger.warning(f"Saving model took {time.perf_counter() - now:.2f} seconds.")
def flatten_dict(nested, sep=".", postprocess_fn=lambda *args: args):
def rec(nest, prefix, into):
for k, v in nest.items():
if sep in k:
raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
if isinstance(v, dict): # collections.Mapping fails in py3.10.
rec(v, prefix + k + sep, into)
else:
v = postprocess_fn(v)
into[prefix + k] = v
flat = {}
rec(nested, "", flat)
return flat
def unpack_dict(d: Dict, keys: Sequence[str], return_type: type = tuple) -> Union[Sequence, Dict]:
if return_type in (tuple, list):
return return_type(d[key] for key in keys)
elif return_type == dict:
return {key: d[key] for key in keys}
else:
raise ValueError(f"Unknown return_type: {return_type}")
def merge_dict(dicts: Sequence[dict], merge_fn: Callable = lambda *args: args) -> dict:
"""Merge a sequence of dicts (with the same set of keys) into a single dict."""
if len(dicts) == 0:
return dict()
return {key: merge_fn([dict_[key] for dict_ in dicts]) for key in dicts[0].keys()}
def model_name_or_path_exists(model_name_or_path: AnyPath) -> bool:
try:
transformers.PretrainedConfig.get_config_dict(model_name_or_path)
except OSError:
return os.path.exists(Path(model_name_or_path) / "trainer_state.json")
return True
def get_transformer_hidden_size(model: transformers.PreTrainedModel):
if isinstance(model, transformers.GPT2LMHeadModel):
hidden_size_attr_name = "n_embd"
elif isinstance(model, transformers.OPTForCausalLM):
hidden_size_attr_name = "word_embed_proj_dim"
elif isinstance(model, transformers.T5ForConditionalGeneration):
hidden_size_attr_name = "d_model"
else:
# Hack to deal with the fact that transformers library changed the LLaMA model name.
llama_cls = getattr(
transformers, "LLaMAForCausalLM" if hasattr(transformers, "LLaMAForCausalLM") else "LlamaForCausalLM"
)
if isinstance(model, llama_cls):
hidden_size_attr_name = "hidden_size"
else:
raise ValueError(f"Unknown base_model type: {type(model)}")
from typing import Any, Mapping
return getattr(model.config, hidden_size_attr_name)
def prepare_inputs(data: Union[torch.Tensor, Any], device: Union[str, int, torch.device]) -> Union[torch.Tensor, Any]:
if isinstance(data, Mapping):
return type(data)({k: prepare_inputs(v, device) for k, v in data.items()}) # noqa
elif isinstance(data, (tuple, list)):
return type(data)(prepare_inputs(v, device) for v in data)
elif isinstance(data, torch.Tensor):
return data.to(device) # This can break with deepspeed.
return data
def cast_with_native_amp(func: Callable, mixed_precision: Optional[str] = None) -> Callable:
"""Almost like how huggingface accelerate cast `model.forward`."""
if mixed_precision not in ("fp16", "bf16"):
logger.warning(f"Unknown mixed precision mode: {mixed_precision}, falling back to fp32.")
return func
if mixed_precision == "fp16" and is_torch_version(">=", "1.10"):
output_func = torch.cuda.amp.autocast(dtype=torch.float16)(func)
else:
device_type = "cuda" if torch.cuda.is_available() else "cpu"
output_func = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(func)
output_func = convert_outputs_to_fp32(output_func)
return output_func
def prepare_model_for_custom_fn(model: nn.Module, fn_name: str, accelerator: accelerate.Accelerator) -> nn.Module:
"""Wrap a custom function of a model with the right mixed precision context.
This function should be run on *raw* model, i.e., before wrapped into DDP or FSDP.
"""
if accelerator.native_amp:
# Store original function.
original_fn_name = f"_original_{fn_name}"
original_fn = getattr(model, fn_name)
setattr(model, original_fn_name, original_fn)
# New set function.
wrapped_fn = cast_with_native_amp(original_fn, mixed_precision=accelerator.mixed_precision)
setattr(model, fn_name, wrapped_fn)
return model
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utility functions.
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/utils.py
"""
logger = logging.get_logger(__name__)
home = os.path.expanduser("~")
home_data = os.path.join(home, "data")
join = os.path.join
pathexists = os.path.exists
makedirs = functools.partial(os.makedirs, exist_ok=True)
dirname = os.path.dirname
basename = os.path.basename
def alleq(l: Sequence, f: Optional[Callable] = lambda x, y: x == y):
"""Check all arguments in a sequence are equal according to a given criterion.
Args:
f: A bi-variate boolean function.
l: A list/tuple.
Returns:
True if everything is equal; otherwise False.
"""
return all(f(l[0], li) for li in l[1:])
def zip_(*args: Sequence):
"""Assert sequences of same length before zipping."""
if len(args) == 0:
return []
assert alleq(args, lambda x, y: len(x) == len(y))
return zip(*args)
def _make_w_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f_dirname = os.path.dirname(f)
if f_dirname != "":
makedirs(f_dirname)
f = open(f, mode=mode)
return f
def _make_r_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
return f
def jload(f, mode="r"):
"""Load a .json file into a dictionary."""
f = _make_r_io_base(f, mode)
jdict = json.load(f)
f.close()
return jdict
def jdump(obj: Union[str, dict, list], f, mode="w", indent=4, default=str):
"""Dump a str or dictionary to a file in json format.
Args:
obj: An object to be written.
f: A string path to the location on disk.
mode: Mode for opening the file.
indent: Indent for storing json dictionaries.
default: A function to handle non-serializable entries; defaults to `str`.
"""
f = _make_w_io_base(f, mode)
if isinstance(obj, (dict, list)):
json.dump(obj, f, indent=indent, default=default)
elif isinstance(obj, str):
f.write(obj)
else:
raise ValueError(f"Unexpected type: {type(obj)}")
f.close()
def jdumps(obj, indent=4, default=str):
return json.dumps(obj, indent=indent, default=default)
def mean(*seqs: Sequence[Numeric]) -> Union[Numeric, Sequence[Numeric]]:
singleton = len(seqs) == 1
means = [float(np.mean(seq)) for seq in seqs]
return means[0] if singleton else means
def stable_resize_token_embeddings_and_tokenizer(
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizer,
special_tokens_dict: dict,
):
"""Resize tokenizer and embedding together.
For new tokens, the embedding value is the average of all old embedding vectors.
"""
tokenizer.add_special_tokens(special_tokens_dict)
stable_resize_token_embeddings(model, len(tokenizer))
def stable_resize_token_embeddings(model: transformers.PreTrainedModel, target_size: int, jitter_new_embeddings=False):
num_new_tokens = target_size - model.get_input_embeddings().weight.size(0)
model.resize_token_embeddings(target_size)
if num_new_tokens > 0:
@torch.inference_mode()
def stable_init(embedding):
embedding_data = embedding.weight.data
embedding_avg = embedding_data[:-num_new_tokens].mean(dim=0, keepdim=True)
embedding_data[-num_new_tokens:] = embedding_avg
if jitter_new_embeddings:
embedding_std = embedding_data[:-num_new_tokens].std(dim=0, keepdim=True)
# The random tensor must be of the same shape as the new embeddings.
embedding_data[-num_new_tokens:] += torch.randn_like(embedding_data[-num_new_tokens:]) * embedding_std
input_embeddings = model.get_input_embeddings() # Must grab this again after resize.
output_embeddings = model.get_output_embeddings()
# It doesn't matter if there's weight sharing or not; with sharing, the second init will overwrite the first.
for embeddings in (input_embeddings, output_embeddings):
stable_init(embeddings)
def convert_str_dtype_to_torch_dtype(str_dtype: Optional[str]):
if str_dtype in ("single", "float32", "float", "fp32", None):
return torch.float
elif str_dtype in ("half", "float16", "fp16"):
return torch.float16
elif str_dtype in ("bfloat16", "bf16"):
return torch.bfloat16
elif str_dtype in ("double", "float64"):
return torch.float64
else:
raise ValueError(f"Unknown dtype: {str_dtype}")
def manual_seed(args_or_seed: Union[int, argparse.Namespace], fix_cudnn=False):
if hasattr(args_or_seed, "seed"):
args_or_seed = args_or_seed.seed
random.seed(args_or_seed)
np.random.seed(args_or_seed)
torch.manual_seed(args_or_seed)
torch.cuda.manual_seed_all(args_or_seed)
os.environ["PYTHONHASHSEED"] = str(args_or_seed)
if fix_cudnn:
torch.backends.cudnn.deterministic = True # noqa
torch.backends.cudnn.benchmark = False # noqa
class InfiniteLoader(object):
"""Wraps an existing loader so that it outputs stuff indefinitely; useful for semi-supervised learning."""
def __init__(self, loader: DataLoader):
super(InfiniteLoader, self).__init__()
self.loader = loader
self.iterator = iter(loader)
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self.iterator = iter(self.loader)
return next(self.iterator)
def parallel_sort(*args: Sequence, key=None, reverse=False):
"""Parallel sort of multiple lists."""
if key is None:
# Parallel sort based on the order of the first list.
key = lambda inputs: inputs[0] # noqa
ret = sorted(zip_(*args), key=key, reverse=reverse)
return tuple([ret_i[j] for ret_i in ret] for j in range(len(args)))
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PyTorch's distributed training.
Compatible with torchrun / elastic.
Internal map:
https://github.com/lxuechen/ml-swissknife/blob/main/ml_swissknife/distributed_utils.py
"""
def setup(rank: Optional[int] = None, world_size: Optional[int] = None):
if rank is None:
rank = get_local_rank()
if world_size is None:
world_size = get_world_size()
if world_size <= 1:
return rank, world_size
if not dist.is_initialized():
if sys.platform == "win32":
# Distributed package only covers collective communications with Gloo
# backend and FileStore on Windows platform. Set init_method parameter
# in init_process_group to a local file.
# Example init_method="file:///f:/libtmp/some_file"
init_method = "file:///f:/libtmp/dist-tmp"
dist.init_process_group(backend="gloo", init_method=init_method, rank=rank, world_size=world_size)
elif torch.cuda.is_available():
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
else:
dist.init_process_group(backend="gloo", rank=rank, world_size=world_size)
return rank, world_size
def cleanup():
dist.destroy_process_group()
def get_local_rank():
return int(os.getenv("LOCAL_RANK", 0))
def get_world_size():
return int(os.getenv("WORLD_SIZE", 1))
def should_save():
"""Return True if the current process is the main process."""
return get_local_rank() <= 0
def all_gather_and_cat(tensor: Tensor, dim=0):
if get_world_size() > 1:
tensor_list = [torch.empty_like(tensor) for _ in range(get_world_size())]
dist.all_gather(tensor_list, tensor)
tensor = torch.cat(tensor_list, dim=dim)
return tensor
is_main_process = should_save
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Postprocessors for prompts and data frames.
Internal map:
https://github.com/lxuechen/human-feedback/blob/main/instruction_following/postprocessor.py
"""
@dataclass
class SequentialPostProcessor(object):
operations: Sequence[Callable]
def __post_init__(self):
special_tokens = []
for operation in self.operations:
if hasattr(operation, "special_tokens"):
special_tokens.extend(operation.special_tokens)
self.special_tokens = special_tokens
def __call__(self, df: Union[pd.DataFrame, dict]) -> Union[pd.DataFrame, dict]:
for operation in self.operations:
df = operation(df)
return df
@dataclass
class RewardConditioningPromptPostprocessor(object):
injected_token = "<reward_0>"
def __call__(self, prompt: str, **kwargs):
return f"{self.injected_token}{prompt}"
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def batch_select(input: Tensor, index: Tensor):
"""Select elements from a batched tensor with a batched index tensor.
Example:
input = torch.tensor([
[0, 1, 2],
[3, 0, 9],
[6, 7, 8],
])
index = torch.tensor([[0, 1], [1, 0], [0, 0]])
batch_select(input, index) = tensor([
[0, 1],
[0, 3],
[6, 6]
])
"""
dummy_index = torch.arange(input.size(0), device=input.device).unsqueeze(-1)
return input[dummy_index, index]
def pad_sequence_from_left(
sequences: Sequence[Tensor],
batch_first: bool = False,
padding_value: float = 0.0,
):
"""Mirror of `torch.nn.utils.rnn.pad_sequence`, but pad from left."""
sequences = tuple(sequence.flip(0) for sequence in sequences)
padded_sequence = torch._C._nn.pad_sequence(sequences, batch_first, padding_value) # noqa
padded_sequence = padded_sequence.flip(int(batch_first))
return padded_sequence
def compute_logprobs(logits: Tensor, labels: Tensor, ignore_index: int) -> Tensor:
"""Compute per-token logprobs, zeroing out places with ignore_index (padding)."""
return -F.cross_entropy(logits.permute(0, 2, 1), labels, reduction="none", ignore_index=ignore_index)
def whiten(values: Tensor, shift_mean=True, epsilon=1e-8) -> Tensor:
assert values.size(0) >= 8, f"Internal error: Minibatch size {values.size(0)} is insufficient for whitening."
mean, std = values.mean(), values.std(unbiased=False) # noqa
whitened = (values - mean) / (std + epsilon)
if not shift_mean:
whitened = whitened + mean
return whitened
def pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0, left=True):
current_size = inputs.size()
diffs = tuple(ti - ci for ti, ci in utils.zip_(target_size, current_size))
pad_params = []
for diff in diffs:
pad_params = ([diff, 0] if left else [0, diff]) + pad_params
res = F.pad(inputs, pad=pad_params, value=value)
return res
def left_pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0):
return pad(inputs=inputs, target_size=target_size, value=value, left=True)
def right_pad(inputs: Tensor, target_size: Union[torch.Size, Sequence[int]], value=0.0):
return pad(inputs=inputs, target_size=target_size, value=value, left=False)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MyAccelerator(accelerate.Accelerator):
"""Thin wrapper for accelerate.Accelerator."""
def __repr__(self):
return (
f"Accelerator(\n"
f" state={self.state}, \n"
f" gradient_accumulation_steps={self.gradient_accumulation_steps:.6f}, \n"
f" split_batches={self.split_batches}, \n"
f" step_scheduler_with_optimizer={self.step_scheduler_with_optimizer},\n"
f")"
)
def unwrap_optimizer(self, optimizer: accelerate.accelerator.AcceleratedOptimizer):
return optimizer.optimizer
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
def format_prompt(example: dict, prompt_dict: dict) -> str:
"""Formats a prompt with a prompt_dict formatter.
Args:
example: A dict-like object with required keys "instruction" and "input"
prompt_dict: Dictionary containing the keys "prompt_noinputs" and "prompt_inputs" which have
placeholders corresponding to the keys from `example`. E.g. "{instruction}".
Returns:
A formatted prompt string.
Examples
--------
>>> format_prompt(dict(instruction="test", input=""), prompt_dict=dict(prompt_noinputs="prompt {instruction} "))
"prompt test"
"""
assert "instruction" in example and "input" in example, "Internal error: example missing required keys."
if example["input"] is None or len(example["input"]) == 0:
formatted_prompt = prompt_dict["prompt_noinputs"].format_map(example)
else:
formatted_prompt = prompt_dict["prompt_inputs"].format_map(example)
return formatted_prompt
def format_output(example: dict, eos_token: Optional[str] = None, output_key="output") -> str:
if eos_token is None:
eos_token = ""
return f"{example[output_key]}{eos_token}"
def format_prompt_with_data_frame(
df: pd.DataFrame,
prompt_dict: dict,
df_postprocessor: Optional[Callable] = None,
return_dict=False,
):
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
prompts = [format_prompt(example, prompt_dict) for example in list_dict_data]
metadata = {"prompt_dict": prompt_dict}
if return_dict:
return dict(prompts=prompts, list_dict_data=list_dict_data, metadata=metadata)
return prompts, list_dict_data, metadata
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> dict:
"""Tokenize a list of strings and return the tokenized content as well metadata (e.g., truncation statistics)."""
padding = getattr(tokenizer, "padding", "max_length")
return_overflowing_tokens = transformers.__version__ <= "4.26.1"
# TODO(lxuechen): Until HF supports fast tokenizer for OPT, we can't make a joint call on the list of strings
# when `return_overflowing_tokens=True`.
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding=padding,
max_length=tokenizer.model_max_length,
truncation=True,
return_overflowing_tokens=return_overflowing_tokens,
)
for text in strings
]
if padding == "max_length":
input_ids = labels = torch.cat([tokenized.input_ids for tokenized in tokenized_list])
else: # "longest"
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
if return_overflowing_tokens:
input_ids_lens = labels_lens = [
tokenizer.model_max_length + tokenized.num_truncated_tokens.item() for tokenized in tokenized_list
]
# `num_truncated_tokens` can be negative, if no truncation occurred.
num_truncated_tokens = sum(max(tokenized.num_truncated_tokens.item(), 0) for tokenized in tokenized_list)
num_truncated_examples = sum(tokenized.num_truncated_tokens.item() > 0 for tokenized in tokenized_list)
else:
logger.warning(
"You are using a `transformers` version that does not support `return_overflowing_tokens=True`. "
"The tokenization metadata will not be recorded."
"In order to see truncation statistics, please downgrade to `transformers<=4.26.1`."
)
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
num_truncated_tokens = num_truncated_examples = -1
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
tokenization_metadata=dict(
num_examples=len(tokenized_list),
num_truncated_tokens=num_truncated_tokens,
num_truncated_examples=num_truncated_examples,
input_ids_avg_len=utils.mean(input_ids_lens),
input_ids_max_len=max(input_ids_lens),
input_ids_min_len=min(input_ids_lens),
labels_avg_len=utils.mean(labels_lens),
labels_max_len=max(labels_lens),
labels_min_len=min(labels_lens),
model_max_length=tokenizer.model_max_length,
),
)
def preprocess_for_sft(
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor=None,
verbose=True,
) -> dict[str, Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Tokenize each example and create the labels.
Args:
df: DataFrame containing the data. Must have columns 'instruction', 'input', and 'output'.
prompt_dict: Dictionary for formatting prompts.
tokenizer: Tokenizer to use. If None, use the tokenizer for the given model.
df_postprocessor: Function to apply to the DataFrame before tokenization.
verbose: Whether to print tokenization metadata.
Returns:
A dictionary mapping str to torch.Tensor.
"""
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
sources = [format_prompt(dict_data, prompt_dict) for dict_data in list_dict_data]
targets = [format_output(dict_data, eos_token=tokenizer.eos_token) for dict_data in list_dict_data]
examples = [s + t for s, t in utils.zip_(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in utils.zip_(labels, sources_tokenized["input_ids_lens"]):
label[:source_len] = constants.IGNORE_INDEX # Input context should not contribute to loss.
packaged_data = dict(
input_ids=input_ids,
labels=labels,
metadata=dict(),
tokenization_metadata=examples_tokenized["tokenization_metadata"],
)
if verbose:
logger.warning(f"Tokenization metadata:\n{utils.jdumps(packaged_data['tokenization_metadata'])}")
return packaged_data
def preprocess_for_reward_modeling(
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
end_sequence_with_eos: bool = False,
verbose=True,
) -> dict[str, torch.Tensor]:
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
index_0, index_1 = tuple(
torch.full(size=(len(list_dict_data), 1), fill_value=fill_value, dtype=torch.long) for fill_value in (0, 1)
)
def _get_numeric_preference(example: dict):
# 1 vs 2 is stored in table, but for modeling we use 0 vs 1; remap here.
return {1: 0, 2: 1}[example["preference"]]
choice = torch.tensor([[_get_numeric_preference(dict_data)] for dict_data in list_dict_data])
def _get_text(example: dict, output_key: str):
source = format_prompt(example, prompt_dict=prompt_dict)
target = format_output(
example,
eos_token=tokenizer.eos_token if end_sequence_with_eos else None,
output_key=output_key,
)
return source + target
text_list_0, text_list_1 = tuple(
[_get_text(dict_data, key) for dict_data in list_dict_data] for key in ("output_1", "output_2")
)
def _merge_tokenization_metadata(metadata_list: Sequence[dict]) -> dict:
num_examples = sum(metadata["num_examples"] for metadata in metadata_list)
num_truncated_tokens = sum(metadata["num_truncated_tokens"] for metadata in metadata_list)
num_truncated_examples = sum(metadata["num_truncated_examples"] for metadata in metadata_list)
input_ids_avg_lens = (
sum([metadata["input_ids_avg_len"] * metadata["num_examples"] for metadata in metadata_list]) / num_examples
)
input_ids_max_len = max(metadata["input_ids_max_len"] for metadata in metadata_list)
input_ids_min_len = min(metadata["input_ids_min_len"] for metadata in metadata_list)
labels_avg_lens = (
sum([metadata["labels_avg_len"] * metadata["num_examples"] for metadata in metadata_list]) / num_examples
)
labels_max_len = max(metadata["labels_max_len"] for metadata in metadata_list)
labels_min_len = min(metadata["labels_min_len"] for metadata in metadata_list)
return dict(
num_examples=num_examples,
num_truncated_tokens=num_truncated_tokens,
num_truncated_examples=num_truncated_examples,
input_ids_avg_len=input_ids_avg_lens,
input_ids_max_len=input_ids_max_len,
input_ids_min_len=input_ids_min_len,
labels_avg_len=labels_avg_lens,
labels_max_len=labels_max_len,
labels_min_len=labels_min_len,
)
logger.warning(f"Tokenizing {len(list_dict_data)} pairs...")
tokenized_0, tokenized_1 = tuple(_tokenize_fn(text_list, tokenizer) for text_list in (text_list_0, text_list_1))
# "size" (bsz, 2, seq_len)
input_ids = [list(pair) for pair in utils.zip_(tokenized_0["input_ids"], tokenized_1["input_ids"])]
labels = [list(pair) for pair in utils.zip_(tokenized_0["labels"], tokenized_1["labels"])]
tokenization_metadata = _merge_tokenization_metadata(
[tokenized_0["tokenization_metadata"], tokenized_1["tokenization_metadata"]]
)
packaged_data = dict(
input_ids=input_ids,
labels=labels,
index_0=index_0,
index_1=index_1,
choice=choice,
tokenization_metadata=tokenization_metadata,
metadata=dict(mean_choice=choice.float().mean().item()),
)
if verbose:
logger.warning(f"Tokenization metadata:\n{utils.jdumps(packaged_data['tokenization_metadata'])}")
return packaged_data
def _get_generator(seed: int) -> torch.Generator:
rng = torch.Generator()
rng.manual_seed(seed)
return rng
def split_train_into_train_and_eval(train_dataset: Dataset, eval_size: int, seed: int) -> tuple[Dataset, Dataset]:
assert eval_size < len(
train_dataset # noqa
), "Requested eval_size cannot be equal/larger than original train data size."
new_train_size = len(train_dataset) - eval_size # noqa
train_dataset, eval_dataset = torch.utils.data.random_split(
train_dataset, [new_train_size, eval_size], generator=_get_generator(seed)
)
return train_dataset, eval_dataset
class SFTDataset(Dataset):
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
):
super(SFTDataset, self).__init__()
data_dict = preprocess_for_sft(
df=df, prompt_dict=prompt_dict, tokenizer=tokenizer, df_postprocessor=df_postprocessor
)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.metadata = data_dict["metadata"]
self.tokenization_metadata = data_dict["tokenization_metadata"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
@dataclasses.dataclass
class DataCollatorForSFTDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=constants.IGNORE_INDEX)
# When sequences are right padded, `attention_mask` is only useful for T5 training.
attention_mask = input_ids.ne(self.tokenizer.pad_token_id).long()
return dict(
input_ids=input_ids,
labels=labels,
attention_mask=attention_mask,
)
class BinaryRewardModelingDataset(Dataset):
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
df_postprocessor: Optional[Callable] = None,
end_sequence_with_eos: bool = False,
):
super(BinaryRewardModelingDataset, self).__init__()
data_dict = preprocess_for_reward_modeling(
df=df,
prompt_dict=prompt_dict,
tokenizer=tokenizer,
df_postprocessor=df_postprocessor,
end_sequence_with_eos=end_sequence_with_eos,
)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.index_0 = data_dict["index_0"]
self.index_1 = data_dict["index_1"]
self.choice = data_dict["choice"]
self.metadata = data_dict["metadata"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
index_0=self.index_0[i],
index_1=self.index_1[i],
choice=self.choice[i],
)
@dataclasses.dataclass
class DataCollatorForBinaryRewardModelingDataset(object):
"""
This collation assumes data preprocessing converts text into *padded* tensors of the same length.
For autoregressive models like OPT and GPT2, `input_ids` alone is sufficient to produce the rewards.
For enc-dec models like T5, we need `labels`.
`input_ids` and `labels` are tensors of size (bsz, num_candidates, max_seq_len), i.e., each batch instance has
`num_candidates` generations/completions.
`index_0` and `index_1` are tensors of size (bsz, num_pairs), and are used to index into `input_ids` and
`labels` to find the first and second sequences in the pair.
`choice` is a binary int/long tensor of size (bsz, num_pairs) indicating which sequence in the pair is better,
i.e., 0 means the first sequence is preferred, and 1 means otherwise.
"""
tokenizer: transformers.PreTrainedTokenizer
def _left_pad_helper(self, instances: Sequence[dict], key: str):
# TODO(lxuechen): Potentially replace with `transformers.PretrainedTokenizerBase.prepare_for_model`.
# `instances` is a list of dicts, each dict has key whose value is a list of tensors, possibly of unequal length.
input_ids = [seq for instance in instances for seq in instance[key]] # Flatten.
input_ids = torch_ops.pad_sequence_from_left(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id,
)
input_ids = einops.rearrange(
input_ids,
"(bsz num_candidates) max_seq_len -> bsz num_candidates max_seq_len",
num_candidates=len(instances[0][key]),
)
return input_ids
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
index_0, index_1, choice = tuple(
torch.stack([instance[key] for instance in instances]) for key in ("index_0", "index_1", "choice")
)
input_ids = self._left_pad_helper(instances, "input_ids")
attention_mask = input_ids.ne(self.tokenizer.pad_token_id).long()
return dict(
input_ids=input_ids,
attention_mask=attention_mask,
index_0=index_0,
index_1=index_1,
choice=choice,
)
class QueryDataset(Dataset):
"""Dataset that emits tokenized left-padded queries."""
def __init__(
self,
df: pd.DataFrame,
prompt_dict: dict,
tokenizer: transformers.PreTrainedTokenizer,
query_len: int,
df_postprocessor: Optional[Callable] = None,
prompt_postprocessor: Optional[Callable] = None,
):
super(QueryDataset, self).__init__()
if df_postprocessor is not None:
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient="records")
prompts = [format_prompt(example=dict_data, prompt_dict=prompt_dict) for dict_data in list_dict_data]
if prompt_postprocessor is not None:
prompts = [prompt_postprocessor(prompt) for prompt in prompts]
queries = [tokenizer(prompt, return_tensors="pt", truncation=False).input_ids[0] for prompt in prompts]
filtered_queries = [query for query in queries if len(query) <= query_len]
logger.warning(
f"Filtered out {len(queries) - len(filtered_queries)} instances out of {len(queries)} that "
f"exceed length limit. These examples are not used for training, but will still be used in evaluation. "
)
queries = torch.stack(
[
torch_ops.left_pad(query, target_size=(query_len,), value=tokenizer.pad_token_id)
for query in filtered_queries
]
)
self.queries = queries
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
# Auxiliary data.
self.prompts = prompts
self.list_dict_data = list_dict_data
def __getitem__(self, i):
return dict(queries=self.queries[i], query_attn_masks=self.query_attn_masks[i])
def __len__(self):
return len(self.queries)
class QueryResponseDataset(Dataset):
def __init__(
self,
tokenizer: transformers.PreTrainedTokenizer,
queries: Sequence[str],
responses: Sequence[str],
query_len: int,
response_len: int,
):
super(QueryResponseDataset, self).__init__()
def tokenize_without_truncation(strings):
return [tokenizer(string, return_tensors="pt", truncation=False).input_ids[0] for string in strings]
sequences = [query + response for query, response in utils.zip_(queries, responses)]
queries = tokenize_without_truncation(queries)
sequences = tokenize_without_truncation(sequences)
responses = [sequence[len(query) :] for sequence, query in utils.zip_(sequences, queries)]
filtered_pairs = [
(query, response)
for query, response in utils.zip_(queries, responses)
if len(query) <= query_len and len(response) <= response_len
]
filtered_queries = [query for query, _ in filtered_pairs]
filtered_responses = [response for _, response in filtered_pairs]
logger.warning(
f"Filtered out {len(queries) - len(filtered_queries)} instances out of {len(queries)} that "
f"exceed length limit... "
f"These examples are not used for training. "
f"However they won't be ignored if this is eval set that is used in `RLTrainer.evaluate`."
)
def left_pad_and_stack(list_of_tensors: Sequence[torch.Tensor], target_len: int):
return torch.stack(
[
torch_ops.left_pad(tensor, target_size=(target_len,), value=tokenizer.pad_token_id)
for tensor in list_of_tensors
]
)
queries = left_pad_and_stack(filtered_queries, query_len)
responses = left_pad_and_stack(filtered_responses, response_len)
self.queries = queries
self.responses = responses
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
def __getitem__(self, i):
return dict(queries=self.queries[i], responses=self.responses[i], query_attn_masks=self.query_attn_masks[i])
def __len__(self):
return len(self.queries)
@dataclasses.dataclass
class DataCollatorForStackableDataset(object):
def __call__(self, instances: Sequence[Dict]) -> Dict[str, Tensor]:
return {key: torch.stack([instance[key] for instance in instances]) for key in instances[0].keys()}
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes that are shared across different algorithms.
WARNING:
Do not tamper with the state_dict function for any of these classes.
If you tamper, make sure the keys are the same, otherwise FSDP will get confused.
"""
logger = logging.get_logger(__name__)
class Policy(nn.Module, abc.ABC):
def __init__(
self, args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
):
super().__init__()
self.args = args
self.base_model = base_model
self.base_tokenizer = base_tokenizer
@abc.abstractmethod
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
raise NotImplementedError
def respond(
self,
queries: Tensor,
query_attn_masks: Tensor,
temperature: Optional[float] = None,
num_return_sequences=1,
) -> Dict[str, Tensor]:
assert not self.training, "Policy must be in eval model for generation."
return self._post_respond(self._respond(queries, query_attn_masks, temperature, num_return_sequences))
@abc.abstractmethod
def _respond(
self, queries: Tensor, query_attn_masks: Tensor, temperature: Optional[float] = None, num_return_sequences=1
) -> Dict[str, Tensor]:
raise NotImplementedError
def _post_respond(self, respond_outputs: Dict[str, Tensor]) -> Dict[str, Tensor]:
return respond_outputs
class AutoregressivePolicy(Policy):
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
# TODO(lxuechen): Refactor attention mask. Here query_attn_masks overrides padding-based attention mask.
if temperature is None:
temperature = self.args.temperature
input_ids = torch.cat([queries, responses], dim=1)
attention_mask = input_ids.ne(self.base_tokenizer.pad_token_id)
attention_mask[:, : queries.size(1)] = query_attn_masks
# Fix position id issues and ensure consistency with `respond` for GPT and OPT.
inputs = self.base_model.prepare_inputs_for_generation(
input_ids=input_ids,
attention_mask=attention_mask,
use_cache=False,
)
outputs = self.base_model(**inputs, output_hidden_states=True)
original_logits = outputs.logits[:, -self.args.response_len - 1 : -1]
logits = original_logits / temperature
labels = input_ids[:, -self.args.response_len :]
logprobs = torch_ops.compute_logprobs(logits, labels, ignore_index=self.base_tokenizer.pad_token_id)
entropies = -(logits.softmax(dim=-1) * logits.log_softmax(dim=-1)).sum(dim=-1)
last_hidden_state = outputs.hidden_states[-1][:, -self.args.response_len - 1 : -1]
return dict(
original_logits=original_logits,
logits=logits,
logprobs=logprobs,
entropies=entropies,
last_hidden_state=last_hidden_state,
)
def _respond(
self,
queries: Tensor,
query_attn_masks: Tensor,
temperature: Optional[float] = None,
num_return_sequences=1,
) -> Dict[str, Tensor]:
if temperature is None:
temperature = self.args.temperature
sequences = self.base_model.generate(
inputs=queries,
attention_mask=query_attn_masks,
do_sample=True,
max_new_tokens=self.args.response_len,
pad_token_id=self.base_tokenizer.pad_token_id,
top_p=1.0,
top_k=0,
temperature=temperature,
num_return_sequences=num_return_sequences,
synced_gpus=True,
)
responses = torch_ops.right_pad(
sequences[:, queries.size(1) :],
target_size=(sequences.size(0), self.args.response_len),
value=self.base_tokenizer.pad_token_id,
)
return dict(responses=responses) # Size (bsz * num_return_sequences, response_len).
class Value(nn.Module, abc.ABC):
def __init__(
self, args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
):
super().__init__()
self.args = args
self.base_model = base_model
self.base_tokenizer = base_tokenizer
hidden_size = common.get_transformer_hidden_size(base_model)
value_head = torch.nn.Linear(hidden_size, 1)
value_head.weight.data.zero_()
value_head.bias.data.zero_()
self.value_head = value_head.to(next(base_model.parameters()).device)
@abc.abstractmethod
def forward(self, queries: Tensor, query_attn_masks: Tensor, responses: Tensor) -> Dict[str, Tensor]:
raise NotImplementedError
class AutoregressiveValue(Value):
def forward(self, queries: Tensor, query_attn_masks: Tensor, responses: Tensor) -> Dict[str, Tensor]:
sequences = torch.cat([queries, responses], dim=1)
sequence_attn_masks = sequences.ne(self.base_tokenizer.pad_token_id)
inputs = self.base_model.prepare_inputs_for_generation(
input_ids=sequences,
attention_mask=sequence_attn_masks,
use_cache=False,
)
outputs = self.base_model.model(**inputs, return_dict=True)
# value[t]: \hat{V}(sequences_{:t-1}); must align with `_estimate_advantage`.
last_hidden_state = outputs.last_hidden_state[:, queries.size(1) - 1 : -1]
values = self.value_head(last_hidden_state).squeeze(-1)
return dict(values=values)
class ActorCritic(nn.Module):
def __init__(self, policy: Policy, value_model: Value):
super(ActorCritic, self).__init__()
self.policy = policy
self.value_model = value_model
def forward(
self,
queries: Tensor,
query_attn_masks: Tensor,
responses: Tensor,
temperature: Optional[float] = None,
) -> Dict[str, Tensor]:
# Assume the policy and value model share the same tokenizer.
o1 = self.policy(queries, query_attn_masks, responses, temperature)
o2 = self.value_model(queries, query_attn_masks, responses)
return {**o1, **o2}
def respond(
self, queries: Tensor, query_attn_masks: Tensor, temperature: Optional[float] = None
) -> Dict[str, Tensor]:
return self.policy.respond(queries=queries, query_attn_masks=query_attn_masks, temperature=temperature)
def make_policy_with_base_model(
args, base_model: transformers.PreTrainedModel, base_tokenizer: transformers.PreTrainedTokenizer
) -> Policy:
if base_model.config.is_encoder_decoder:
raise NotImplementedError
else:
return AutoregressivePolicy(args, base_model, base_tokenizer)
def make_value_with_base_model(
args,
base_model: transformers.PreTrainedModel,
base_tokenizer: transformers.PreTrainedTokenizer,
) -> Value:
if base_model.config.is_encoder_decoder:
raise NotImplementedError
else:
return AutoregressiveValue(args, base_model, base_tokenizer)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RewardConfig(transformers.PretrainedConfig):
model_type = "reward_model"
# Huggingface doesn't allow non-kwargs for `__init__`.
def __init__(self, backbone_model_name_or_path=None, **kwargs):
super(RewardConfig, self).__init__(**kwargs)
self.backbone_model_name_or_path = backbone_model_name_or_path
self._name_or_path = backbone_model_name_or_path
class RewardModelOutput(ModelOutput):
rewards: Tensor = None
class RewardModel(transformers.PreTrainedModel):
config_class = RewardConfig
def __init__(self, config: RewardConfig, **kwargs):
super(RewardModel, self).__init__(config)
self.backbone_model = common.make_generative_lm(config.backbone_model_name_or_path, **kwargs)
hidden_size = common.get_transformer_hidden_size(self.backbone_model)
reward_head = nn.Linear(hidden_size, 1)
torch.nn.init.zeros_(reward_head.bias)
self.reward_head = reward_head.to(next(self.backbone_model.parameters()).device)
def forward(self, input_ids, attention_mask=None, return_dict=True, **kwargs):
# We only compute the rewards and don't compute the logistic regression loss in this function so that it's
# easier to use for later stages of reranking / RL training.
outputs = self.backbone_model.model(
input_ids=input_ids, attention_mask=attention_mask, return_dict=True, **kwargs
)
last_hidden_state = outputs.last_hidden_state
last_hidden_state_at_the_end = last_hidden_state[:, -1, :]
# TODO(lxuechen): Make returning rewards at all positions and last_hidden_state an option.
rewards = self.reward_head(last_hidden_state_at_the_end).squeeze(-1)
return RewardModelOutput(rewards=rewards) if return_dict else (rewards,)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
__all__ = ["alpaca_leaderboard", "PairwiseAutoAnnotator"]
CURRENT_DIR = Path(__file__).parent
ANNOTATORS_CONFIG_DIR = CURRENT_DIR / "annotators"
PRECOMPUTED_LEADERBOARD = {
"annotator_pool_v0/configs.yaml": {
# Internal codename: rlhf_llama_7b_regen_v7_3ep_v12_ckpt_20
"RLHF PPO": {
"n_draws": 9.0,
"n_total": 805.0,
"n_wins": 392.0,
"n_wins_base": 404.0,
"standard_error": 1.753281981205392,
"win_rate": 49.25465838509317,
},
# Internal codename: sft_v6_52k_llama_7b_regen_v7_3ep_recover
"SFT 52k (Alpaca 7B)": {
"n_draws": 16.0,
"n_total": 805.0,
"n_wins": 312.0,
"n_wins_base": 477.0,
"standard_error": 1.707927043869429,
"win_rate": 39.75155279503105,
},
# Internal codename: sft_v6_llama_7b_regen_v7_3ep
"SFT 10k": {
"n_draws": 19.0,
"n_total": 802.0,
"n_wins": 278.00,
"n_wins_base": 505.00,
"standard_error": 1.67,
"win_rate": 35.85,
},
"Davinci001": {
"n_draws": 0.0,
"n_total": 805.0,
"n_wins": 201.0,
"n_wins_base": 604.0,
"standard_error": 1.5264851835334794,
"win_rate": 24.96894409937888,
},
"ChatGPT": {
"n_draws": 9.0,
"n_total": 805.0,
"n_wins": 503.0,
"n_wins_base": 293.0,
"standard_error": 1.6920642123984606,
"win_rate": 63.04347826086957,
},
"LLaMA 7B": {
"n_draws": 0.0,
"n_total": 775.0,
"n_wins": 98.0,
"n_wins_base": 677.0,
"standard_error": 1.1946348760380694,
"win_rate": 12.645161290322582,
},
"GPT4": {
"n_draws": 17.0,
"n_total": 804.0,
"n_wins": 631.0,
"n_wins_base": 156.0,
"standard_error": 1.4002932714785454,
"win_rate": 79.53980099502488,
},
}
}
# TODO: alpaca_leaderboard could also be replaced with alpaca_eval functions
def alpaca_leaderboard(
path_or_all_outputs: Union[eval_utils.AnyData, eval_utils.AnyPath],
annotators_config: eval_utils.AnyPath = "annotator_pool_v0/configs.yaml",
name: str = "Current method",
is_add_reference_methods: bool = True,
is_print_metrics: bool = False,
**kwargs,
) -> pd.DataFrame:
"""Add the given model to the Alpaca leaderboard.
Parameters
----------
path_or_all_outputs : str or list of dict
The outputs of the model to add to the leaderboard as a list of dictionaries, or a path to list of JSON. Each
dictionary (or row) should contain the following keys: `instruction`, `input`, and `output`.
annotators_config : str, optional
The path to the annotator's config file. For details see the docstring of `PairwiseAutoAnnotator`.
name : str, optional
The name of the model to add to the leaderboard.
is_add_reference_methods : bool, optional
Whether to add the Alpaca reference methods to the leaderboard.
is_print_metrics : bool, optional
Whether to print the metrics.
kwargs :
Additional arguments to pass to `PairwiseAutoAnnotator`.
"""
try:
with open(path_or_all_outputs) as f:
all_outputs = json.load(f)
logging.info(f"Loaded outputs from {path_or_all_outputs}.")
except:
all_outputs = path_or_all_outputs
if is_add_reference_methods:
all_metrics = PRECOMPUTED_LEADERBOARD[annotators_config]
else:
all_metrics = dict()
outputs_baseline = datasets.load_dataset(
"tatsu-lab/alpaca_farm",
"alpaca_farm_evaluation",
cache_dir=constants.DEFAULT_CACHE_DIR,
)["eval"]
if len(all_outputs) != 805:
logging.warning(
f"""You gave {len(all_outputs)} outputs, but there are 805 examples in Alpaca Eval.
We are computing the metrics on all examples you gave."""
)
outputs_1 = eval_utils.load_or_convert_to_dataframe(outputs_baseline)
outputs_2 = eval_utils.load_or_convert_to_dataframe(all_outputs)
annotator = PairwiseAutoAnnotator(annotators_config=annotators_config, **kwargs)
annotated = annotator.annotate_head2head(outputs_1=outputs_1, outputs_2=outputs_2)
all_metrics[name] = metrics.pairwise_to_winrate(preferences=[a["preference"] for a in annotated])
df_results = pd.DataFrame(all_metrics).T.sort_values(by="win_rate", ascending=False)
if is_print_metrics:
print(df_results.to_string(float_format="%.2f"))
else:
return df_results
class PairwiseAutoAnnotator(eval_annotators.PairwiseAnnotator):
def __init__(
self,
annotators_config: Union[eval_utils.AnyPath, list[dict[str, Any]]] = "annotator_pool_v0",
input_keys: Sequence[str] = ("instruction", "input"),
p_label_flip: Optional[float] = None,
base_dir: eval_utils.AnyPath = ANNOTATORS_CONFIG_DIR,
other_keys_to_keep: Sequence[str] = tuple(),
**kwargs,
):
super().__init__(
annotators_config=annotators_config,
input_keys=input_keys,
p_label_flip=p_label_flip,
base_dir=base_dir,
other_keys_to_keep=other_keys_to_keep,
**kwargs,
)
@property
def SingleAnnotator(self):
return SinglePairwiseAutoAnnotator
class SinglePairwiseAutoAnnotator(eval_annotators.SinglePairwiseAnnotator):
def _get_prompt_template(self, prompt_template: dict[str, str]):
# prompt_template will now be a dictionary of prompt templates of len 2 (one with and one without input)
_get_prompt_template = super()._get_prompt_template
return {k: _get_prompt_template(prompt) for k, prompt in prompt_template.items()}
def make_prompts(self, df_to_annotate, prompt_template=None):
if prompt_template is None:
prompt_template = self.prompt_template
arr_is_inputs = (df_to_annotate["input"] != "") & (df_to_annotate["input"].notnull())
df_with_inputs = df_to_annotate[arr_is_inputs]
df_without_inputs = df_to_annotate[~arr_is_inputs]
prompts, df = super().make_prompts(
df_without_inputs,
prompt_template=prompt_template["without_inputs"],
)
if arr_is_inputs.any():
prompts_i, df_i = super().make_prompts(
df_with_inputs,
prompt_template=prompt_template["with_inputs"],
)
prompts += prompts_i
df = pd.concat([df, df_i], axis=0, ignore_index=True)
return prompts, df
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.getLogger(__name__)
def rotate_half(x):
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_embedding(q, k, cos, sin):
cos, sin = cos.to(q.dtype), sin.to(q.dtype)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class LlamaAttention(modeling_llama.LlamaAttention):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
def forward( # noqa
self,
hidden_states: torch.Tensor, # (total_nnz, hidden_size).
seqlens: torch.Tensor, # (bsz,).
cu_seqlens: torch.Tensor, # (bsz+1,).
rotary_tensors: tuple[torch.Tensor, torch.Tensor],
# position_ids is only used for non-flash version, when past_key_value is not None. For flash version,
# rotary_tensors already takes positions into account.
position_ids: Optional[torch.Tensor] = None,
# Crucial loop invariant: We assume past_key_value (input/output) is always in padded format.
# More precisely, each tensor is of size (bsz, num_heads, seqlen, head_dim).
# Otherwise we can't extend it with the current key/value embedding through torch.cat easily.
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
use_cache=False,
attention_mask_k: Optional[torch.Tensor] = None,
pad_back: Optional[Callable] = None,
):
if past_key_value is None:
# (total_nnz, hidden_size) -> (total_nnz, num_heads, head_dim).
query_states, key_states, value_states = [
einops.rearrange(func(hidden_states), "t (h d) -> t h d", h=self.num_heads)
for func in (self.q_proj, self.k_proj, self.v_proj)
]
query_states, key_states = apply_rotary_embedding(query_states, key_states, *rotary_tensors)
qkv = torch.stack([query_states, key_states, value_states], dim=1)
assert qkv.dtype in (
torch.float16,
torch.bfloat16,
), f"Flash attention expected mixed precision. But found qkv dtype: {qkv.dtype}"
attn_output = flash_attn_unpadded_qkvpacked_func(
qkv=qkv,
cu_seqlens=cu_seqlens,
max_seqlen=seqlens.max(),
dropout_p=0.0,
causal=True,
softmax_scale=self.head_dim**-0.5,
)
attn_output = einops.rearrange(attn_output, "t h d -> t (h d)")
attn_output = self.o_proj(attn_output)
if use_cache:
key_states, value_states = tuple(
einops.rearrange(pad_back(tensor), "b s h d -> b h s d") for tensor in (key_states, value_states)
)
past_key_value = (key_states, value_states)
return attn_output, None, past_key_value
else:
return super(LlamaAttention, self).forward( # noqa
hidden_states=hidden_states,
attention_mask=attention_mask_k,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
)
class LlamaDecoderLayer(modeling_llama.LlamaDecoderLayer):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
del self.self_attn
self.self_attn = LlamaAttention(config=config)
def forward( # noqa
self,
hidden_states: torch.Tensor,
seqlens: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_tensors: tuple[torch.Tensor, torch.Tensor],
position_ids: torch.Tensor,
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
use_cache=False,
attention_mask_k: Optional[torch.Tensor] = None,
pad_back: Optional[Callable] = None,
):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn( # noqa
hidden_states=hidden_states,
seqlens=seqlens,
cu_seqlens=cu_seqlens,
rotary_tensors=rotary_tensors,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
attention_mask_k=attention_mask_k,
pad_back=pad_back,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = apex_patch.apex_rmsnorm(self.post_attention_layernorm, hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value,)
return outputs
class LlamaModel(modeling_llama.LlamaModel):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config=config)
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self._cache_rotary_embeddings()
def _cache_rotary_embeddings(self, max_position_embeddings=2048, base=10000):
dim = self.config.hidden_size // self.config.num_attention_heads
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos(), persistent=False) # (seqlen, head_dim).
self.register_buffer("sin_cached", emb.sin(), persistent=False) # (seqlen, head_dim).
def _make_rotary_tensors(self, position_ids: torch.Tensor):
# position_ids only affects the cos and sin applied to the query and key embeddings.
# flash path: position_ids size = (total_nnz,); cos sin size = (total_nnz, 1, head_dim)
# nonflash path: we don't create rotary tensors here, and rely on the builtin RotaryEmbedding.
# this assumes position_ids size = (bsz, seqlen).
assert position_ids.dim() == 1
# (total_nnz, 1, head_dim)
cos, sin = [tensor[position_ids].unsqueeze(1) for tensor in (self.cos_cached, self.sin_cached)]
return cos, sin
def forward( # noqa
self,
input_ids: torch.LongTensor,
attention_mask: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
assert not output_attentions
assert inputs_embeds is None
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.embed_tokens(input_ids)
execute_flash = past_key_values is None
if execute_flash:
if position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
position_ids = torch.cat([t[i] for t, i in utils.zip_(position_ids, is_selected)])
rotary_tensors = self._make_rotary_tensors(position_ids)
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = tensor_ops.unpad_input(hidden_states, attention_mask)
attention_mask_k = None
else:
if position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -1].unsqueeze(-1)
rotary_tensors = None
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = hidden_states, lambda x: x, None, None
# Broadcast assumes query_len == 1.
attention_mask_k = torch.zeros(
size=attention_mask.size(), dtype=hidden_states.dtype, device=hidden_states.device
).masked_fill(~attention_mask.bool(), torch.tensor(torch.finfo(hidden_states.dtype).min))[:, None, None, :]
all_hidden_states = () if output_hidden_states else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (pad_back(hidden_states),)
past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = decoder_layer(
hidden_states=hidden_states,
seqlens=attention_mask.sum(dim=1),
cu_seqlens=cu_seqlens_q,
rotary_tensors=rotary_tensors,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
attention_mask_k=attention_mask_k,
pad_back=pad_back,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[1],)
hidden_states = apex_patch.apex_rmsnorm(self.norm, hidden_states)
hidden_states = pad_back(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if return_dict:
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
)
return tuple(v for v in (hidden_states, next_cache, all_hidden_states) if v is not None)
class LlamaForCausalLM(modeling_llama.LlamaForCausalLM):
def __init__(self, config: modeling_llama.LlamaConfig):
super().__init__(config)
self.model = LlamaModel(config)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
if past_key_values is None: # flash path
position_ids = attention_mask.long().cumsum(-1) - 1
is_selected = attention_mask == 1
position_ids = torch.cat(
[
this_position_ids[this_is_selected]
for this_position_ids, this_is_selected in utils.zip_(position_ids, is_selected)
]
)
else: # non-flash path
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pad_to_multiples_of_x(tensor: torch.Tensor, x: int = 8):
"""Pad a tensor along the batch dimension to a multiple of x."""
total_nnz, hidden_size = tensor.size()
pad_len = (x - total_nnz % x) % x
if pad_len != 0:
tensor = torch.cat(
[
tensor,
torch.zeros([pad_len, hidden_size], device=tensor.device, dtype=tensor.dtype),
],
dim=0,
)
def unpad_x(padded_tensor):
return padded_tensor[:-pad_len] if pad_len > 0 else padded_tensor
return tensor, unpad_x
def unpad_input(padded: torch.Tensor, attention_mask: torch.Tensor) -> tuple[torch.Tensor, Callable, torch.Tensor, int]:
"""Wrapper for unpad_input in official flash-attn."""
batch_size, padded_seqlen = padded.shape[:2]
unpadded, indices, cu_seqlens, max_seqlen = bert_padding.unpad_input(padded, attention_mask)
def pad_back(unpadded: torch.Tensor):
return bert_padding.pad_input(unpadded, indices, batch_size, padded_seqlen)
return unpadded, pad_back, cu_seqlens, max_seqlen
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
class OPTDecoderLayer(modeling_opt.OPTDecoderLayer):
def forward( # noqa
self,
# (bsz x seqlen, hidden_size) or (bsz, 1, hidden_size) if past_key_value is not None.
hidden_states: torch.Tensor,
pad_back: Callable,
cu_seqlens_q: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
# Crucial loop invariant: We assume past_key_value (input/output) is always in padded format.
# More precisely, each tensor is of size (bsz, seqlen, hidden_size).
# Otherwise we can't extend it with the current key/value embedding through torch.cat easily.
past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask_k: Optional[torch.Tensor] = None, # (bsz, seqlen+1,).
use_cache=False,
):
residual = hidden_states
hidden_states = apex_patch.apex_layernorm(self.self_attn_layer_norm, hidden_states)
query = self.self_attn.q_proj(hidden_states)
key = self.self_attn.k_proj(hidden_states)
value = self.self_attn.v_proj(hidden_states)
num_heads, head_dim = self.self_attn.num_heads, self.self_attn.head_dim
if past_key_value is None: # hidden_states should be in unpadded format to run flash-attn.
query, key, value = tuple(
einops.rearrange(tensor, "nnz (h d) -> nnz h d", h=num_heads, d=head_dim)
for tensor in (query, key, value)
)
hidden_states = flash_attn_unpadded_func(
q=query,
k=key,
v=value,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_q,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_q,
dropout_p=(self.self_attn.dropout if self.training else 0.0),
causal=True,
softmax_scale=self.self_attn.scaling,
)
hidden_states = einops.rearrange(hidden_states, "nnz h d -> nnz (h d)")
else: # hidden_states should be in padded format.
query = query * self.self_attn.scaling
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
query_states = einops.rearrange(query, "b s (h d) -> (b h) s d", h=num_heads, d=head_dim)
key_states = einops.rearrange(key, "b l (h d) -> (b h) l d", h=num_heads, d=head_dim)
value_states = einops.rearrange(value, "b l (h d) -> (b h) l d", h=num_heads, d=head_dim)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
attn_weights = (
# attention_mask_k broadcast correctness assumes query_len == 1.
einops.rearrange(attn_weights, "(b h) s l -> b h s l", h=num_heads)
+ attention_mask_k[:, None, None, :]
)
attn_weights = einops.rearrange(attn_weights, "b h s l -> (b h) s l")
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
hidden_states = torch.bmm(attn_probs, value_states)
hidden_states = einops.rearrange(hidden_states, "(b h) s d -> b s (h d)", h=num_heads, d=head_dim)
# Below requires pytorch 2.0. Installing pytorch 2.0 however may break other packages.
# Only migrate when things become more stable.
# hidden_states = F.scaled_dot_product_attention(
# query=query,
# key=key,
# value=value,
# attn_mask=attention_mask_k[:, None, None, :].bool(), # This assumes query_len == 1.
# dropout_p=(self.self_attn.dropout if self.training else 0.0),
# causal=False,
# )
# hidden_states = einops.rearrange(hidden_states, "b h s d -> b s (h d)")
hidden_states = self.self_attn.out_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = apex_patch.apex_layernorm(self.final_layer_norm, hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if use_cache:
if past_key_value is None:
key, value = tuple(
einops.rearrange(pad_back(tensor), "b s h d -> b s (h d)", h=num_heads, d=head_dim)
for tensor in (key, value)
)
present_key_value = (key, value) # (bsz, seqlen+1, hidden_size).
outputs += (present_key_value,)
return outputs
class OPTDecoder(modeling_opt.OPTDecoder):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.post_init()
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, transformers.models.opt.modeling_opt.BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# This simplified fast implementation only supports a subset of configurations.
# We also ignore use_cache, but we don't assert that because it's True at training time
# (even though it's not actually used) and I don't know how to set it to False at training time only.
# We can add support for specific configurations as needed.
assert attention_mask is not None
assert output_attentions is False
assert head_mask is None
assert self.gradient_checkpointing is False
assert inputs_embeds is None
assert self.final_layer_norm is not None
assert self.project_in is None
assert self.project_out is None
assert self.layerdrop == 0
for layer in self.layers:
assert layer.do_layer_norm_before is True
# past_key_values is a list of tuples (key, value). key/value each of size (bsz, seqlen, hidden_size).
past_key_values_length = past_key_values[0][0].shape[1] if past_key_values is not None else 0
# Embed inputs and positions
input_ids = input_ids.view(-1, input_ids.shape[-1])
inputs_embeds = self.embed_tokens(input_ids)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
assert (
inputs_embeds.size() == pos_embeds.size()
), "Internal error: inputs_embeds and pos_embeds not of same shape."
hidden_states = inputs_embeds + pos_embeds
if past_key_values_length == 0:
# Unpad hidden states: (bsz, seqlen, hidden_size) -> (total_nnz, hidden_size)
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = tensor_ops.unpad_input(hidden_states, attention_mask)
attention_mask_k = None
else:
hidden_states, pad_back, cu_seqlens_q, max_seqlen_q = hidden_states, lambda x: x, None, None
attention_mask_k = torch.zeros(
size=attention_mask.size(), dtype=inputs_embeds.dtype, device=inputs_embeds.device
).masked_fill(~attention_mask.bool(), torch.tensor(torch.finfo(inputs_embeds.dtype).min))
next_decoder_cache = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
for idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (pad_back(hidden_states),)
past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = layer(
hidden_states=hidden_states,
pad_back=pad_back,
cu_seqlens_q=cu_seqlens_q,
max_seqlen_q=max_seqlen_q,
past_key_value=past_key_value,
attention_mask_k=attention_mask_k,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[1],)
hidden_states = apex_patch.apex_layernorm(self.final_layer_norm, hidden_states)
hidden_states = pad_back(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if return_dict:
return transformers.models.opt.modeling_opt.BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
)
return tuple(v for v in (hidden_states, next_cache, all_hidden_states) if v is not None)
class OPTModel(modeling_opt.OPTModel):
def __init__(self, config: modeling_opt.OPTConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
self.post_init()
class OPTForCausalLM(modeling_opt.OPTForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = OPTModel(config)
self.post_init()
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
try:
import apex
apex_is_installed = True
logger.warning("`apex` is installed. Using fused operators.")
except ImportError as e:
apex_is_installed = False
logger.warning("`apex` is not installed. Reverting to non-fused operators.")
def apex_layernorm(ln_module, input_):
if apex_is_installed:
return apex.normalization.fused_layer_norm.FusedLayerNormAffineFunction.apply(
input_, ln_module.weight, ln_module.bias, ln_module.normalized_shape, ln_module.eps
)
else:
return ln_module(input_)
def apex_rmsnorm(ln_module, input_):
if apex_is_installed:
return apex.normalization.fused_layer_norm.FusedRMSNormAffineFunction.apply(
input_, ln_module.weight, ln_module.weight.size(), ln_module.variance_epsilon
)
else:
return ln_module(input_)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@dataclasses.dataclass
class NullCharCleanUp(object):
def __call__(self, string: str):
return string.replace("\x00", "")
def __repr__(self):
return "NullCharCleanUp cleans up the NULL chars to prevent db write failures due to encoding discrepancy."
def load_model_and_tokenizer_for_inference(
model_name_or_path: str,
cache_dir=constants.DEFAULT_CACHE_DIR,
model_cls=transformers.AutoModelForCausalLM,
model_kwargs: Optional[dict] = None,
tokenizer_kwargs: Optional[dict] = None,
resize_token_embeddings_if_mismatch=True,
) -> Tuple[transformers.PreTrainedModel, transformers.PreTrainedTokenizer]:
"""Load huggingface model and tokenizer from path or with name for inference.
This function should only be used for decoding or reward scoring.
Notes:
- This function is only guaranteed to work correctly when loading admissible model families,
i.e., opt and llama.
- Loaded models are in eval mode.
- By default, this function internally shrinks the model embedding size to avoid generating out of vocab tokens.
Models like OPT are by default created with embedding size that's divisible by 64, even though the vocab
size is not. This is to help with training speed, but can be problematic when generating, i.e., there is
a low probability of generating out of vocab ids (especially for untrained models).
- By default, loaded models are on the device specified by LOCAL_RANK or cpu.
- This behavior can be overridden by passing `device_map` to model_kwargs.
- By default, loaded tokenizers are slow tokenizers in left padding mode.
- This behavior can be overridden by passing `use_fast` and `padding_side` to tokenizer_kwargs.
"""
logger.warning(f"Loading model for inference: {model_name_or_path}")
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
default_model_kwargs = dict(low_cpu_mem_usage=True, device_map={"": device}, cache_dir=cache_dir)
if model_kwargs is None:
model_kwargs = default_model_kwargs
else:
default_model_kwargs.update(model_kwargs) # Make possible overriding default_model_kwargs.
model_kwargs = default_model_kwargs
default_tokenizer_kwargs = dict(padding_side="left", use_fast=False, cache_dir=cache_dir)
if tokenizer_kwargs is None:
tokenizer_kwargs = default_tokenizer_kwargs
else:
default_tokenizer_kwargs.update(tokenizer_kwargs)
tokenizer_kwargs = default_tokenizer_kwargs
model = model_cls.from_pretrained(model_name_or_path, **model_kwargs).eval()
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path, **tokenizer_kwargs)
if tokenizer.pad_token is None:
# base llama does not come with a pad token, possible for other pretrained models as well
tokenizer.add_special_tokens({"pad_token": constants.DEFAULT_PAD_TOKEN})
if isinstance(model, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)):
input_embedding_size = model.get_input_embeddings().weight.size(0)
num_tokens = len(tokenizer)
if input_embedding_size != num_tokens and resize_token_embeddings_if_mismatch:
logger.warning(
f"Model embedding size {input_embedding_size} is not equal to vocab size {num_tokens}. "
f"Shrinking/growing embedding size. "
"This is okay if your previous embeddings were inflated to a multiple of 64 for faster computation. "
"But generally, be cautious! This may cause unexpected behavior!!!"
)
utils.stable_resize_token_embeddings(model, num_tokens)
return model, tokenizer
@dataclasses.dataclass
class HFDecodingArguments:
"""Only the core args for decoding with HF models."""
top_p: float = 0.9
top_k: int = 0
temperature: float = 1.0
do_sample: bool = True
num_beams: int = 1
max_new_tokens: int = 100 # This is aligned with `openai_utils.OpenAIDecodingArguments`.
num_return_sequences: int = 1
@torch.inference_mode()
def decode_prompts_with_huggingface_given_model(
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizer,
prompts: Sequence[str],
decoding_args: HFDecodingArguments,
per_device_batch_size=20,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
pad_to_length=2048, # Force pad to this length for distributed communication to work.
tf32=True,
force_multisample_format: bool = False,
cleanup_funcs: Optional[Sequence[Callable]] = (NullCharCleanUp(),),
divide_work: bool = True,
internal_batch_return_sequences: Optional[int] = None,
seed: Optional[int] = None,
communication_num_chunks=1,
tokenization_batch_size=1000,
**decoding_kwargs,
) -> Union[List[List[str]], List[str]]:
"""Decode from a given model a sequence of string prompts."""
if seed is not None:
utils.manual_seed(seed)
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = tf32 # noqa
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
model.generate = common.cast_with_native_amp(model.generate, mixed_precision=mixed_precision)
logger.warning(f"mixed_precision = {mixed_precision}")
generate_kwargs = copy.deepcopy(decoding_args.__dict__)
generate_kwargs.update(
dict(eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, synced_gpus=world_size > 1)
)
generate_kwargs.update(decoding_kwargs) # Possibly overwrite default values for `pad_token_id` and `eos_token_id`.
prompts = prompts[:max_instances]
ori_data_size = len(prompts)
# Make the prompts set a multiple of world_size * per_device_batch_size by padding with the last prompt.
if world_size > 1 and divide_work:
multiple_of = world_size * per_device_batch_size
else:
multiple_of = per_device_batch_size
new_data_size = multiple_of * int(math.ceil(ori_data_size / multiple_of))
new_prompts = list(prompts) + [prompts[-1]] * (new_data_size - ori_data_size)
if world_size > 1 and divide_work: # divide into chunks
per_worker_size = new_data_size // world_size
new_prompts = new_prompts[local_rank * per_worker_size : (local_rank + 1) * per_worker_size]
# TODO(lxuechen): Refactor to tokenize upfront. This way we can pad with tokenizer, and not worry ourselves.
completions = []
for batch_idx, start_idx in tqdm.tqdm(
enumerate(range(0, len(new_prompts), per_device_batch_size)), # Increase the index by the actual batch size.
desc="decoding batches",
total=len(new_prompts) // per_device_batch_size,
disable=not distributed_utils.is_main_process(),
):
batch = new_prompts[start_idx : start_idx + per_device_batch_size]
source = tokenizer(batch, return_tensors="pt", padding=True)
source = common.prepare_inputs(source, device=device)
inputs, attention_mask = source.input_ids, source.attention_mask
if batch_idx == 0: # FSDP is buggy; we do a forward pass first to make it happy
model(input_ids=inputs, attention_mask=attention_mask)
if (
internal_batch_return_sequences is not None
and internal_batch_return_sequences < decoding_args.num_return_sequences
):
# we batch along the num_return_sequences dimension to avoid OOM errors
# usually, return_sequences is dimension (NxR, L) where N is the batch size and R is the number of
# return sequences
# we split this into batches of size (NxR', L) where R' is the number of return sequences in each batch
batch_generate_kwargs = copy.deepcopy(generate_kwargs)
# initialize the list of return sequences for each prompt
sequences = []
for internal_start_idx in range(
0, generate_kwargs["num_return_sequences"], internal_batch_return_sequences
):
internal_batch_size = batch_generate_kwargs["num_return_sequences"] = min(
internal_batch_return_sequences, generate_kwargs["num_return_sequences"] - internal_start_idx
)
internal_batch_sequences = model.generate(
inputs=inputs,
attention_mask=attention_mask,
**batch_generate_kwargs,
)
if not model.config.is_encoder_decoder:
internal_batch_sequences = internal_batch_sequences[:, inputs.shape[1] :]
internal_batch_sequences = torch_ops.right_pad(
internal_batch_sequences,
(internal_batch_sequences.size(0), pad_to_length),
value=tokenizer.pad_token_id,
)
# einops rearange (n d) l -> n d l
internal_batch_sequences = einops.rearrange(
internal_batch_sequences, "(n d) l -> n d l", d=internal_batch_size
)
# append the return sequences for each prompt
sequences.append(internal_batch_sequences)
# concatenate the return sequences for each prompt
sequences = torch.cat(sequences, dim=1)
sequences = einops.rearrange(
sequences,
"n d l -> (n d) l",
)
else:
if internal_batch_return_sequences is not None:
logger.warning(
f"internal_batch_return_sequences ({internal_batch_return_sequences}) >= "
f"num_return_sequences ({decoding_args.num_return_sequences}). Not batching over return sequences."
)
sequences = model.generate(inputs=inputs, attention_mask=attention_mask, **generate_kwargs)
if not model.config.is_encoder_decoder:
sequences = sequences[:, inputs.shape[1] :]
sequences = torch_ops.right_pad(sequences, (sequences.size(0), pad_to_length), value=tokenizer.pad_token_id)
out_of_bound_mask = sequences >= len(tokenizer)
if out_of_bound_mask.any():
logger.fatal(f"Found tokens outside the vocabulary: {sequences[out_of_bound_mask]}")
completions.append(sequences.cpu())
completions = torch.cat(completions, dim=0)
if world_size > 1 and divide_work:
torch.cuda.empty_cache()
logger.info(f"RANK {local_rank} starting all_gather with {communication_num_chunks} communication_num_chunks")
mine = einops.rearrange(completions, "(n d) l -> n d l", d=generate_kwargs["num_return_sequences"])
chunks = torch.chunk(mine, chunks=communication_num_chunks, dim=1)
all_chunk_list = [
distributed_utils.all_gather_and_cat(chunk.contiguous().to(device), dim=0).cpu() for chunk in chunks
]
completions = torch.cat(all_chunk_list, dim=1)
completions = einops.rearrange(completions, "n d l -> (n d) l")
logger.info(
f"RANK {local_rank} Start tokenizer batch decoding {completions.size(0)} sequences", main_process_only=False
)
# chunk completions into chunks of 1000 and tokenize
text_sequences = []
for start_idx in tqdm.trange(0, completions.size(0), tokenization_batch_size):
text_sequences.extend(
tokenizer.batch_decode(
completions[start_idx : start_idx + tokenization_batch_size],
skip_special_tokens=True,
)
)
if cleanup_funcs is not None:
for cleanup_func in cleanup_funcs:
text_sequences = [cleanup_func(s) for s in text_sequences]
logger.info(f"RANK {local_rank} Finished tokenizer batch decoding and cleaning", main_process_only=False)
# convert the list into a nested list of consecutive `num_return_sequences` items, if > 1.
if decoding_args.num_return_sequences > 1 or force_multisample_format:
text_sequences = [
text_sequences[i : i + decoding_args.num_return_sequences]
for i in range(0, len(text_sequences), decoding_args.num_return_sequences)
]
text_sequences = text_sequences[:ori_data_size]
return text_sequences
def decode_prompts_with_huggingface(
model_name_or_path: str,
prompts: Sequence[str],
decoding_args: HFDecodingArguments,
cache_dir=constants.DEFAULT_CACHE_DIR,
per_device_batch_size=20,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
pad_to_length=2048, # Force pad to this length for distributed communication to work.
tf32=True,
force_multisample_format: bool = False,
seed: Optional[int] = None,
communication_num_chunks: int = 1,
**decoding_kwargs,
) -> Union[List[List[str]], List[str]]:
"""Decode from a huggingface model given a sequence of string prompts.
Args:
prompts: A sequence of string prompts.
decoding_args: Decoding arguments.
model_name_or_path: The name or path of the huggingface model. If it is a path, the directory location should also store
the tokenizer.
per_device_batch_size: The batch size per device for decoding.
cache_dir: The directory to cache the huggingface model.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
max_instances: The maximum number of prompts to decode.
pad_to_length: The token length to pad the prompts. This is necessary for and only used in distributed decoding.
tf32: Whether to use tensorfloat32 for matrix multiplication.
force_multisample_format: Whether to force the outputs to be in the multisample format.
seed: The random seed. If None, this function is generally not deterministic, unless the seed is fixed outside.
communication_num_chunks: Number of chunks to create for final communication.
Increase this to reduce the size of the chunk per communication.
**decoding_kwargs: Misc keyword args for `model.generate`.
Setting values here may override the values given by `decoding_args`.
Returns:
A list of string responses, if `num_return_sequences` is 1 and not `force_multisample_format`;
otherwise, a list of lists of string responses.
"""
model, tokenizer = load_model_and_tokenizer_for_inference(
model_name_or_path=model_name_or_path,
cache_dir=cache_dir,
model_kwargs=dict(torch_dtype=utils.convert_str_dtype_to_torch_dtype(mixed_precision)),
)
return decode_prompts_with_huggingface_given_model(
model=model,
tokenizer=tokenizer,
prompts=prompts,
decoding_args=decoding_args,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
max_instances=max_instances,
pad_to_length=pad_to_length,
tf32=tf32,
force_multisample_format=force_multisample_format,
seed=seed,
communication_num_chunks=communication_num_chunks,
**decoding_kwargs,
)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@torch.inference_mode()
def score_sequences_with_huggingface_given_model(
model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
sequences: Sequence[str],
per_device_batch_size=20,
max_instances=sys.maxsize,
mixed_precision: Optional[str] = None,
tf32=False,
divide_work=True,
):
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = tf32 # noqa
local_rank, world_size = distributed_utils.setup()
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else torch.device("cpu")
model.forward = common.cast_with_native_amp(model.forward, mixed_precision=mixed_precision)
logger.warning(f"mixed_precision = {mixed_precision}")
sequences = sequences[:max_instances]
ori_data_size = len(sequences)
# To make communication work, we round up the dataset to the nearest multiple of the actual batch size.
if world_size > 1 and divide_work:
batch_size = per_device_batch_size * world_size
else:
batch_size = per_device_batch_size
new_data_size = batch_size * int(math.ceil(ori_data_size / batch_size)) # Nearest multiple.
new_sequences = list(sequences) + [sequences[-1]] * (new_data_size - ori_data_size) # Pad with the last prompt.
return_rewards = []
for batch_idx, start_idx in tqdm.tqdm(
enumerate(range(0, new_data_size, batch_size)),
desc="evaluating rewards for batches",
total=new_data_size // batch_size,
disable=not distributed_utils.is_main_process(),
):
batch = new_sequences[start_idx : start_idx + batch_size]
if world_size > 1 and divide_work:
local_batch = batch[local_rank * per_device_batch_size : (local_rank + 1) * per_device_batch_size]
else:
local_batch = batch
source = tokenizer(
local_batch,
return_tensors="pt",
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
)
source = common.prepare_inputs(source, device=device)
rewards = model(input_ids=source.input_ids, attention_mask=source.attention_mask).rewards
if world_size > 1 and divide_work:
rewards = distributed_utils.all_gather_and_cat(rewards, dim=0)
return_rewards.extend(rewards.tolist())
return return_rewards[:ori_data_size]
def score_sequences_with_huggingface(
sequences: Sequence[str],
model_name_or_path: str,
per_device_batch_size=20,
cache_dir=constants.DEFAULT_CACHE_DIR,
max_instances=sys.maxsize,
mixed_precision: Optional[str] = None,
tf32=False,
flash_attn=False,
) -> List[float]:
"""Score samples with a reward model.
Args:
sequences: A sequence of strings.
model_name_or_path: Name of the reward model.
per_device_batch_size: The batch size per device for evaluating rewards.
cache_dir: The directory to cache the huggingface model.
max_instances: The maximum number of prompts to rerank.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
A list of floats representing rewards.
"""
model, tokenizer = load_model_and_tokenizer_for_inference(
model_name_or_path=model_name_or_path,
model_cls=reward_model.RewardModel,
cache_dir=cache_dir,
model_kwargs=dict(
torch_dtype=utils.convert_str_dtype_to_torch_dtype(mixed_precision),
flash_attn=flash_attn,
),
)
return score_sequences_with_huggingface_given_model(
model=model,
tokenizer=tokenizer,
sequences=sequences,
per_device_batch_size=per_device_batch_size,
mixed_precision=mixed_precision,
max_instances=max_instances,
tf32=tf32,
)
@torch.inference_mode()
def rerank_sequences_with_huggingface(
sequences: Sequence[Sequence[str]],
model_name_or_path: str,
rerank_top_k=1,
per_device_batch_size=20,
cache_dir=constants.DEFAULT_CACHE_DIR,
mixed_precision: Optional[str] = None,
max_instances=sys.maxsize,
tf32=False,
flash_attn=False,
) -> Tuple[List[List[str]], List[List[int]]]:
"""Rerank samples with a reward model.
Args:
sequences: A nested sequence of strings. Each inner sequence contains samples with the same prompt.
model_name_or_path: Name of the reward model.
rerank_top_k: The number of top samples to return.
per_device_batch_size: The batch size per device for evaluating rewards.
cache_dir: The directory to cache the huggingface model.
max_instances: The maximum number of prompts to rerank.
mixed_precision: Whether to use mixed precision. If None, no casting will be performed.
tf32: Whether to use tensorfloat32 for matrix multiplication.
flash_attn: Turns on flash_attn for the reward model if True.
Returns:
A tuple with two entries.
The first is a nested sequence of strings. Each inner sequence contains the top-k samples with the same prompt.
The second is a nested sequence of integers. Each inner sequence contains the indices of the top-k samples.
"""
sequences = sequences[:max_instances]
flat_sequences = [sequence_i_j for sequence_i in sequences for sequence_i_j in sequence_i]
rewards = score_sequences_with_huggingface(
sequences=flat_sequences,
model_name_or_path=model_name_or_path,
per_device_batch_size=per_device_batch_size,
cache_dir=cache_dir,
mixed_precision=mixed_precision,
tf32=tf32,
flash_attn=flash_attn,
)
rewards = einops.rearrange(torch.tensor(rewards), "(b m) -> b m", m=len(sequences[0]))
# Nested list of "size" (data_size, num_options).
top_indices = rewards.topk(rerank_top_k, dim=1).indices.tolist()
top_sequences = [[sequence[i] for i in top_index] for sequence, top_index in utils.zip_(sequences, top_indices)]
return top_sequences, top_indices
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reward conditioning a la QUARK.
For all-quantiles formulation, during decoding, each instance takes the following form (except first decoding stage):
<bos_token><reward_cond_token><query><response><eos_token>
E.g.,
<s><reward_0>Tell me something about alpacas.Alpacas are cute.</s>
"""
FIRST_STEP_IDX = 1
logger = logging.get_logger(__name__)
def ignore_tokens(input_ids: Tensor, attention_mask: Tensor, tokens_to_ignore: Sequence[int]):
"""Clear out positions where input_ids has tokens_to_ignore in attention_mask."""
attention_mask = attention_mask.clone()
for token_to_ignore in tokens_to_ignore:
attention_mask[input_ids == token_to_ignore] = 0
return input_ids, attention_mask
class DataPool(object):
def __init__(self, tokenizer: transformers.PreTrainedTokenizer):
self.tokenizer = tokenizer
self.additional_special_tokens = tokenizer.additional_special_tokens
self.queries = []
self.responses = []
self.rewards = []
def add(self, queries, responses, rewards):
for main_list, this_list in utils.zip_(
(self.queries, self.responses, self.rewards), (queries, responses, rewards)
):
main_list.extend(this_list)
def clear(self):
(self.queries, self.responses, self.rewards) = [], [], []
def sort_and_get(self, train_on_best_quantile=True):
queries, responses, rewards = utils.parallel_sort(
self.queries,
self.responses,
self.rewards,
key=lambda x: x[-1],
reverse=True,
)
size = len(queries)
chunk_sizes = [size // len(self.additional_special_tokens)] * len(self.additional_special_tokens)
chunk_sizes[-1] = chunk_sizes[-1] + size % len(self.additional_special_tokens)
assert sum(chunk_sizes) == size, "Internal error: Sum of chunk sizes doesn't match up with total size."
if train_on_best_quantile: # Don't inject any tokens here.
queries, responses, rewards = tuple(l[: chunk_sizes[0]] for l in (queries, responses, rewards))
else:
injected_tokens = []
for chunk_index, chunk_size in enumerate(chunk_sizes):
injected_tokens.extend([self.additional_special_tokens[chunk_index]] * chunk_size)
queries = [f"{injected_token}{query}" for injected_token, query in utils.zip_(injected_tokens, queries)]
return queries, responses, rewards
class QuarkTrainer(rl_trainer.RLTrainer):
def __init__(
self,
args,
train_dataset: data_utils.QueryDataset,
eval_dataset: data_utils.QueryDataset,
data_collator: Callable,
policy: nn.Module,
ref_policy: nn.Module,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super().__init__(
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
tokenizer=tokenizer,
accelerator=accelerator,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
self.data_pool = DataPool(self.tokenizer)
self.entropy_ctl = kl_controller.FixedKLController(kl_coef=args.entropy_coef)
self.sft_dataloader = None # Must be instantiated in `rollout`.
def train(self):
total_epochs = self.args.total_epochs
total_episodes = len(self.train_dataset) * total_epochs # noqa
total_steps = total_episodes // self.args.rollout_batch_size # noqa
logger.warning(
f"***Training starts***\n"
f"Total epochs: {total_epochs} => Total episodes: {total_episodes} => Total steps: {total_steps}",
)
self.create_optimizer_and_scheduler(total_steps)
infinite_train_dataloader = self.get_train_dataloader()
for step_idx in tqdm.tqdm(
range(FIRST_STEP_IDX, total_steps + FIRST_STEP_IDX),
disable=not self.accelerator.is_main_process,
desc="steps",
total=total_steps,
):
if step_idx % self.args.save_steps == 0 or step_idx in self.args.save_steps_extra_list:
self.save_model(utils.join(self.args.output_dir, f"checkpoint-{step_idx}"))
if self.args.eval_steps is not None and step_idx % self.args.eval_steps == 0:
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
unwrapped_policy = unwrapped_policy.base_model
self.evaluate(step_idx, unwrapped_policy=unwrapped_policy)
self.log_history.append(self.step(infinite_train_dataloader, step_idx))
return self.log_history
def step(self, train_dataloader, step_idx, **kwargs):
rollouts_dataloader = self.rollout(train_dataloader, step_idx)
stats_list = []
for _ in tqdm.tqdm(
range(self.args.num_gradient_steps_per_step), disable=not self.accelerator.is_main_process, desc="gradstep"
):
for substep_idx in range(1, self.accelerator.gradient_accumulation_steps + 1):
# WARNING: self.accelerator.accumulate can lead to misleading results, since sync_gradients is
# dependent on whether the registered dataloader ends or not (or step % accumulation_steps).
# If your dataloader ends before the last step, gradients are not synced, and the optimizer wants to
# update. This gives you a shape mismatch error.
should_sync = substep_idx == self.accelerator.gradient_accumulation_steps
context = contextlib.nullcontext if should_sync else self.accelerator.no_sync
# no_sync here results in higher memory usage because FSDP will accumulate the full model gradients
# (instead of gradient shards) until the eventual sync.
with context(self.policy):
batch = next(rollouts_dataloader)
loss, stats_for_this_step = self.compute_loss(batch, **kwargs)
self.accelerator.backward(loss)
if should_sync:
if self.args.max_grad_norm is not None:
self.accelerator.clip_grad_norm_(self.policy.parameters(), self.args.max_grad_norm)
stats_for_this_step["loss/grad_norm"] = self._compute_grad_norm()
stats_list.append(stats_for_this_step)
self.accelerator.unwrap_optimizer(self.optimizer).step()
self.policy.zero_grad(set_to_none=True)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
stats = common.merge_dict(stats_list, torch.stack) # list of dict -> dict: str -> 1-D tensor
stats = self.record_step_stats(stats, step_idx=step_idx)
return stats
def compute_loss(
self, batch: Dict[str, Tensor], logprobs_coef=1.0, kl_coef=None, entropy_coef=None
) -> Tuple[Tensor, Dict]:
self.policy.train()
queries, query_attn_masks, responses = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device),
keys=("queries", "query_attn_masks", "responses"),
return_type=tuple,
)
queries_no_quark, query_attn_masks_no_quark = ignore_tokens(
input_ids=queries,
attention_mask=query_attn_masks,
tokens_to_ignore=self.tokenizer.additional_special_tokens_ids,
)
policy_outputs = self.policy(queries, query_attn_masks, responses, temperature=self.args.temperature)
with torch.inference_mode():
ref_policy_outputs = self.ref_policy(
queries_no_quark, query_attn_masks_no_quark, responses, temperature=self.args.temperature
)
logits, logprobs = common.unpack_dict(policy_outputs, keys=("logits", "logprobs"))
(ref_logits,) = common.unpack_dict(ref_policy_outputs, keys=("logits",))
original_vocab_size = len(self.tokenizer) - self.args.num_reward_tokens
logits, ref_logits = tuple(t[..., :original_vocab_size] for t in (logits, ref_logits))
kl_per_token = F.kl_div(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1), reduction="none").sum(
dim=-1
)
entropies = -(logits.softmax(dim=-1) * logits.log_softmax(dim=-1)).sum(dim=-1)
# https://github.com/GXimingLu/Quark/blob/a4baf754de15f4d9675dd394571a7dd35fc0abd9/main.py#L252
assert responses.size() == logprobs.size() == kl_per_token.size() == entropies.size()
masks = responses == self.tokenizer.pad_token_id
kl_per_token.masked_fill_(masks, 0.0)
entropies.masked_fill_(masks, 0.0)
kl_coef = self.kl_ctl.value if kl_coef is None else kl_coef
entropy_coef = self.entropy_ctl.value if entropy_coef is None else entropy_coef
loss = -logprobs * logprobs_coef + kl_per_token * kl_coef - entropies * entropy_coef
loss = loss.mean()
kl_avg_seq = kl_per_token.sum() / (~masks).sum() # noqa
kl_sum_seq = kl_per_token.sum() / kl_per_token.size(0)
stats = dict(
train=dict(
logprobs=logprobs.mean(),
entropies=entropies.mean(),
kl_avg_seq=kl_avg_seq,
kl_sum_seq=kl_sum_seq,
loss=loss,
masks=masks.float().sum(dim=1).mean(dim=0), # noqa
),
)
return loss, common.flatten_dict(stats, sep="/", postprocess_fn=lambda x: x.detach())
def get_train_dataloader(self):
logger.warning(f"Train dataset size: {len(self.train_dataset)}")
train_dataloader = DataLoader(
dataset=self.train_dataset,
collate_fn=self.data_collator,
batch_size=self.args.rollout_per_device_batch_size,
shuffle=True, # Don't actually need to shuffle; shuffle to make consistent.
drop_last=True,
)
train_dataloader = self.accelerator.prepare(train_dataloader) # noqa
self._log_batch_size(train_dataloader, "train_dataloader")
return utils.InfiniteLoader(train_dataloader)
@torch.inference_mode()
def rollout(self, train_dataloader: utils.InfiniteLoader, step_idx: int) -> utils.InfiniteLoader:
"""Get responses conditioned on top reward token and add to data pool."""
self.policy.eval()
self._make_fsdp_happy()
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
if self.args.clear_data_pool_on_each_rollout:
self.data_pool.clear()
text_queries_all, text_responses_all, rewards_all = [], [], []
for batch_idx in tqdm.tqdm(
range(self.args.rollout_accumulation_steps), disable=not self.accelerator.is_main_process, desc="rollout"
):
batch = next(train_dataloader)
queries, query_attn_masks = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device), keys=("queries", "query_attn_masks")
)
if step_idx == FIRST_STEP_IDX: # Must ignore the reward token on first generation.
queries, query_attn_masks = ignore_tokens(
input_ids=queries,
attention_mask=query_attn_masks,
tokens_to_ignore=self.tokenizer.additional_special_tokens_ids,
)
respond_outputs = unwrapped_policy.respond(queries, query_attn_masks, temperature=self.args.temperature)
(responses,) = common.unpack_dict(respond_outputs, ("responses",))
# Strings below should not contain reward tokens.
text_queries, text_responses = tuple(
self.tokenizer.batch_decode(tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for tensor in (queries, responses)
)
del queries, responses # Prevent mistakes.
text_sequences = [q + r for q, r in utils.zip_(text_queries, text_responses)]
sequences = self.tokenizer(text_sequences, return_tensors="pt", padding=True, truncation=True)
rewards = self.reward_model(**sequences).rewards
# Nothing in here should contain the reward token!
self.data_pool.add(queries=text_queries, responses=text_responses, rewards=rewards.tolist())
text_queries_all.extend(text_queries)
text_responses_all.extend(text_responses)
rewards_all.extend(rewards.tolist())
if self.accelerator.is_main_process:
rollouts_to_disk = {"queries": text_queries_all, "responses": text_responses_all, "rewards": rewards_all}
rollouts_to_disk = pd.DataFrame(rollouts_to_disk).to_dict(orient="records")
utils.jdump(rollouts_to_disk, utils.join(self.args.output_dir, "rollouts", f"step_{step_idx}.json"))
self.accelerator.log({"train/reward": utils.mean(rewards_all)}, step=step_idx)
text_queries, text_responses, _ = self.data_pool.sort_and_get(self.args.train_on_best_quantile)
rollouts_dataset = data_preprocessor.QueryResponseDataset(
tokenizer=self.tokenizer,
queries=text_queries,
responses=text_responses,
query_len=self.args.query_len,
response_len=self.args.response_len,
)
rollouts_dataloader = DataLoader(
dataset=rollouts_dataset,
collate_fn=data_utils.DataCollatorForStackableDataset(),
batch_size=self.args.step_per_device_batch_size,
shuffle=True,
drop_last=True,
)
rollouts_dataloader = utils.InfiniteLoader(rollouts_dataloader)
return rollouts_dataloader
def record_step_stats(self, stats, step_idx, **kwargs):
for k, v in stats.items():
stats[k] = v.mean(dim=0)
stats = {key: value.item() if torch.is_tensor(value) else value for key, value in stats.items()}
stats["train/kl_coef"] = self.args.kl_coef
stats["train/entropy_coef"] = self.args.entropy_coef
stats["train/lr"] = self.optimizer.param_groups[0]["lr"]
if self.accelerator.is_main_process:
self.accelerator.log(stats, step=step_idx)
return stats
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None, give_rw_access=True):
output_dir = self.args.output_dir if output_dir is None else output_dir
utils.makedirs(output_dir)
model, tokenizer = self.policy, self.tokenizer
with FSDP.state_dict_type(
model, StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
):
logger.warning("Gathering full state_dict...")
state_dict = model.state_dict()
logger.warning("Finished gathering full state_dict...")
if self.accelerator.is_main_process:
# Retain and remap policy keys.
new_state_dict = dict()
prefix = "base_model."
for key, value in state_dict.items():
if key.startswith(prefix):
new_state_dict[key[len(prefix) :]] = value
state_dict = new_state_dict
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
unwrapped = unwrap_model(model).base_model
assert isinstance(
unwrapped, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)
), f"Expected to save a generative policy, but found model to be of type: {type(unwrapped)}."
if hasattr(unwrapped, "_keys_to_ignore_on_save"):
logger.warning(f"keys to ignore on save: {unwrapped._keys_to_ignore_on_save}")
logger.warning(f"Saving model checkpoint to {output_dir}")
logger.warning(f"Saving {len(cpu_state_dict)} keys:\n{utils.jdumps(cpu_state_dict.keys())}")
unwrapped.save_pretrained(output_dir, state_dict=cpu_state_dict)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, constants.TRAINING_ARGS_NAME))
def _make_left_padded_tokenizer(
model_name_or_path: AnyPath,
cache_dir: AnyPathOrNone = constants.DEFAULT_CACHE_DIR,
**kwargs,
) -> transformers.PreTrainedTokenizer:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir=cache_dir,
padding_side="left",
**kwargs,
)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens(dict(pad_token=constants.DEFAULT_PAD_TOKEN))
return tokenizer
def make_tokenizer(args):
# policy_tokenizer left pads, since the policy requires batch decoding.
policy_tokenizer = _make_left_padded_tokenizer(
args.policy_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
# reward_tokenizer left pads, since we need the embedding of the right most non-pad token.
reward_tokenizer = _make_left_padded_tokenizer(
args.reward_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
if policy_tokenizer.get_vocab() != reward_tokenizer.get_vocab():
raise ValueError("AlpacaFarm does not support different tokenizer for policy and reward models.")
logger.warning(f"Adding {args.num_reward_tokens} reward conditioning tokens for Quark.")
policy_tokenizer.add_special_tokens(
{"additional_special_tokens": [f"<reward_{i}>" for i in range(args.num_reward_tokens)]} # noqa
)
return policy_tokenizer
def make_models(
tokenizer: transformers.PreTrainedTokenizer,
args,
accelerator: accelerate.Accelerator,
):
def make_generative_policy():
base_model = common.make_generative_lm(
model_name_or_path=args.policy_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
utils.stable_resize_token_embeddings(base_model, len(tokenizer), jitter_new_embeddings=True)
return base_model
def make_reward_model():
return reward_model_module.RewardModel.from_pretrained(
args.reward_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
policy = common.prepare_model_for_custom_fn(model=policy, fn_name="respond", accelerator=accelerator)
policy = accelerator.prepare(policy) # noqa
ref_policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
ref_policy.requires_grad_(False)
ref_policy = accelerator.prepare(ref_policy) # noqa
reward_model = make_reward_model()
reward_model.requires_grad_(False)
reward_model = accelerator.prepare(reward_model)
# TODO: This is a hack to get FSDP running. Remove in the future when this is fixed.
if accelerator.distributed_type == accelerate.DistributedType.FSDP:
inputs = tokenizer("fsdp are you happy now??? :)" * 50, return_tensors="pt")
inputs = {key: value.to(accelerator.device) for key, value in inputs.items()}
policy(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
return dict(policy=policy, ref_policy=ref_policy, reward_model=reward_model)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: str = field(default="alpaca_instructions")
train_splits: List[str] = field(default_factory=lambda: ["unlabeled"])
eval_splits: List[str] = field(default_factory=lambda: ["val"])
prompt_dict_path: str = field(
default=None,
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
wandb_project: str = field(default=constants.WANDB_PROJECT)
cache_dir: Optional[str] = field(default=constants.DEFAULT_CACHE_DIR)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
truncate_tokens: Optional[List[str]] = field(
default_factory=lambda: None,
metadata={
"help": "Tokens in strings to truncate at first occurrence. "
"This was used in original OAI summarization paper to avoid models returning incomplete sentences. "
},
)
truncate_after: Optional[int] = field(
default=None, metadata={"help": "Truncate after this number of tokens. Prevents early truncation."}
)
penalty_reward_value: float = field(
default=-1.0,
metadata={
"help": "Reward assigned to sequences that are truncated, "
"e.g., due to outputting incomplete sentences for given context window."
},
)
total_epochs: int = field(default=10)
rollout_batch_size: int = field(default=512)
step_batch_size: int = field(default=256)
rollout_per_device_batch_size: int = field(default=32)
step_per_device_batch_size: int = field(default=2)
adam_epsilon: float = field(
default=1e-5,
metadata={
"help": "Epsilon for AdamW optimizer. "
"This is the default for OAI PPO code and UW Quark code. "
"This is not the Hugging Face default."
},
)
temperature: float = field(default=1.0)
kl_coef: float = field(default=0.2)
target_kl: float = field(default=6.0)
k_beta: float = field(default=0.1)
adaptive_kl: bool = field(default=False)
eval_batches: int = field(default=sys.maxsize, metadata={"help": "Maximum number of batches to evaluate on."})
save_steps_extra: Optional[str] = field(
default=None,
metadata={
"help": "A list of predetermined checkpoints to save, represented in the format 'no1__no2__no3'. "
"Parse this with str.split('__')."
},
)
query_len: int = field(default=192)
response_len: int = field(default=300)
policy_model_name_or_path: str = field(default=None)
reward_model_name_or_path: str = field(default=None)
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
num_reward_tokens: int = field(default=4, metadata={"help": "Number of extra reward conditioning tokens in Quark."})
entropy_coef: float = field(
default=0.0,
metadata={"help": "Entropy regularization coefficient for Quark."},
)
clear_data_pool_on_each_rollout: bool = field(
default=True,
metadata={"help": "If True, clear the data pool before each rollout period for Quark."},
)
train_on_best_quantile: bool = field(
default=True,
metadata={"help": "If True, train only on the examples with best rewards for Quark."},
)
num_gradient_steps_per_step: int = field(
default=1,
metadata={"help": "Number of gradient steps to take per step for Quark."},
)
def __post_init__(self):
# Super class' __post_init__ is very complicated; don't do super for now in case mess something up.
# super().__post_init__()
if self.tf32: # super().__post_init__() actually does this.
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = True # noqa
world_size = distributed_utils.get_world_size()
# Checks on rollout_batch_size only matter for PPO.
assert self.rollout_batch_size >= self.rollout_per_device_batch_size * world_size, (
"rollout_batch_size is smaller than rollout_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.rollout_batch_size % (self.rollout_per_device_batch_size * world_size) == 0
), "rollout_batch_size is not a multiple of rollout_per_device_batch_size * world_size. "
assert self.step_batch_size >= self.step_per_device_batch_size * world_size, (
"step_batch_size is smaller than step_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.step_batch_size % (self.step_per_device_batch_size * world_size) == 0
), "step_batch_size is not a multiple of step_per_device_batch_size * world_size. "
logger.warning(
f"Rollout stats:\n"
f"\trollout_batch_size: {self.rollout_batch_size}\n"
f"\trollout_per_device_batch_size: {self.rollout_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.rollout_batch_size // self.rollout_per_device_batch_size) % world_size == 0
self.rollout_accumulation_steps = self.rollout_batch_size // self.rollout_per_device_batch_size // world_size
logger.warning(
f"Step stats:\n"
f"\tstep_batch_size: {self.step_batch_size}\n"
f"\tstep_per_device_batch_size: {self.step_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.step_batch_size // self.step_per_device_batch_size) % world_size == 0
self.gradient_accumulation_steps = self.step_batch_size // self.step_per_device_batch_size // world_size
logger.warning(
f"Accumulation steps:\n"
f"\trollout_accumulation_steps: {self.rollout_accumulation_steps}\n"
f"\tgradient_accumulation_steps: {self.gradient_accumulation_steps}\n"
)
if self.save_steps_extra is not None:
self.save_steps_extra_list = [int(string) for string in self.save_steps_extra.split("__")]
else:
self.save_steps_extra_list = []
assert self.num_reward_tokens > 1, "Quark requires at least 2 reward tokens."
def set_truncate_token_ids(self, tokenizer: transformers.PreTrainedTokenizer):
"""Convert truncation token to token ids.
This is called in RLTrainer.
"""
truncate_tokens = self.truncate_tokens
if truncate_tokens is None:
truncate_token_ids = None
else:
truncate_token_ids = tokenizer.convert_tokens_to_ids(truncate_tokens)
self.truncate_token_ids = truncate_token_ids
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class KLController(abc.ABC):
value: Union[int, float]
def step(self, *args, **kwargs):
pass
class FixedKLController(KLController):
def __init__(self, kl_coef):
super(FixedKLController, self).__init__()
self.value = kl_coef
class AdaptiveKLController(KLController):
def __init__(self, init_kl_coef, target_kl, k_beta, accelerator=None):
super(AdaptiveKLController, self).__init__()
self.value = init_kl_coef
self.target_kl = target_kl
self.k_beta = k_beta
self.accelerator = accelerator
def step(self, current_kl: float):
if self.accelerator is not None:
current_kl = torch.tensor(current_kl, device=self.accelerator.device)
dist.all_reduce(current_kl, op=dist.ReduceOp.SUM)
current_kl = (current_kl / self.accelerator.num_processes).item()
proportional_error = np.clip(current_kl / self.target_kl - 1, -0.2, 0.2)
mult = 1.0 + self.k_beta * proportional_error
self.value *= mult
def make_kl_controller(args, accelerator=None):
if args.adaptive_kl:
return AdaptiveKLController(
init_kl_coef=args.kl_coef,
target_kl=args.target_kl,
k_beta=args.k_beta,
accelerator=accelerator,
)
else:
return FixedKLController(kl_coef=args.kl_coef)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
class PPOTrainer(rl_trainer.RLTrainer):
def __init__(
self,
args,
train_dataset: data_preprocessor.QueryDataset,
eval_dataset: data_preprocessor.QueryDataset,
data_collator: Callable,
policy: rl_models.ActorCritic,
ref_policy: rl_models.Policy,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super(PPOTrainer, self).__init__(
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
tokenizer=tokenizer,
accelerator=accelerator,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
def _shape_reward(
self, rewards: Tensor, responses: Tensor, logprobs: Tensor, ref_logprobs: Tensor
) -> Dict[str, Tensor]:
# For some reason, line below doesn't work.
# kl = (logits.softmax(dim=-1) * (logits.log_softmax(dim=-1) - ref_logits.log_softmax(dim=-1))).sum(dim=-1)
kl = torch.clamp(logprobs - ref_logprobs, min=0.0)
non_score_rewards = -self.kl_ctl.value * kl
shaped_rewards = non_score_rewards.clone()
# This introduces a small index off by one bug if pad_token_id == eos_token_id.
terminal_positions = (responses != self.tokenizer.pad_token_id).sum(dim=1) - 1
shaped_rewards[list(range(rewards.size(0))), terminal_positions] += rewards
return dict(shaped_rewards=shaped_rewards, non_score_rewards=non_score_rewards, kl=kl)
def _estimate_advantage(self, rewards: Tensor, values: Tensor) -> Dict[str, Tensor]:
"""Generalized advantage estimation.
Reference:
https://arxiv.org/abs/1506.02438
"""
if self.args.whiten_rewards:
rewards = torch_ops.whiten(rewards, shift_mean=False)
lastgaelam = 0
advantages_reversed = []
gen_length = self.args.response_len
for t in reversed(range(gen_length)):
nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0
delta = rewards[:, t] + self.args.gamma * nextvalues - values[:, t]
lastgaelam = delta + self.args.gamma * self.args.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], dim=1)
returns = advantages + values
advantages = torch_ops.whiten(advantages, shift_mean=True)
return dict(returns=returns, advantages=advantages)
@torch.inference_mode()
def rollout(self, queries_data) -> Dict[str, Tensor]:
"""Rollout trajectories with policy.
Args:
queries_data: Sequence of batches or DataLoader.
Each batch is a dict with keys 'queries' and 'query_attn_masks'.
Returns:
Dictionary with keys
'queries', 'query_attn_masks', 'responses',
'logprobs', 'ref_logprobs', 'values',
'rewards', 'non_score_rewards', 'shaped_rewards'.
"""
# Give up dropout throughout.
self.policy.eval()
self._make_fsdp_happy()
# `keep_fp32_wrapper` retains the autocast wrapper of model.forward created by accelerate:
# recall one sets mixed precision options with accelerator.
# The precise value of this arg doesn't matter here, since we use the unwrapped model only for respond.
# Generally, try to use the wrapped model as much as you can, since it's got the autocast/cast-back wrappers.
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
self.ref_policy.eval()
self.reward_model.eval()
rollouts = []
for batch_idx, batch in tqdm.tqdm(
enumerate(queries_data),
disable=not self.accelerator.is_main_process,
desc="rollout",
):
# Sample rollouts.
queries, query_attn_masks = common.unpack_dict(
common.prepare_inputs(batch, device=self.accelerator.device),
keys=("queries", "query_attn_masks"),
)
respond_outputs = unwrapped_policy.respond(queries, query_attn_masks, temperature=self.args.temperature)
(responses,) = common.unpack_dict(respond_outputs, ("responses",))
# Evaluate logprobs of the samples.
rollouts_batch = {"queries": queries, "query_attn_masks": query_attn_masks, "responses": responses}
policy_outputs = self.policy(**rollouts_batch, temperature=self.args.temperature)
ref_policy_outputs = self.ref_policy(**rollouts_batch, temperature=self.args.temperature)
policy_outputs = common.unpack_dict(
policy_outputs, keys=("logprobs", "values", "entropies"), return_type=dict
)
ref_policy_outputs = common.unpack_dict(
ref_policy_outputs, keys=("logprobs", "entropies"), return_type=dict
)
rollouts_batch.update(policy_outputs)
rollouts_batch.update({f"ref_{key}": value for key, value in ref_policy_outputs.items()})
# Evaluate reward of the samples.
text_queries, text_responses = tuple(
self.tokenizer.batch_decode(tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for tensor in (queries, responses)
)
del queries, responses # Prevent mistakes.
# We retokenizer, since policy and reward model might not have the same tokenizer.
# TODO(lxuechen): Avoid retokenization when policy and reward tokenizer are the same.
text_sequences = [q + r for q, r in utils.zip_(text_queries, text_responses)]
# TODO(lxuechen): This response retokenization has issues with OPT, since the tokenizer always prepend
# <bos_token>. But the issue is local to post_reward, which isn't an issue if we don't penalize.
sequences, responses = tuple(
self.tokenizer(text, return_tensors="pt", padding=True, truncation=True)
for text in (text_sequences, text_responses)
)
sequences, responses = common.prepare_inputs((sequences, responses), device=self.accelerator.device)
reward_outputs = self.reward_model(**sequences)
reward_outputs = self.post_reward(reward_outputs, responses.input_ids)
rollouts_batch.update(reward_outputs)
# Shape reward with KL penalty.
shape_reward_outputs = self._shape_reward(
rewards=rollouts_batch["rewards"],
responses=rollouts_batch["responses"],
logprobs=rollouts_batch["logprobs"],
ref_logprobs=rollouts_batch["ref_logprobs"],
)
rollouts_batch.update(shape_reward_outputs)
rollouts_batch_cpu = {key: value.cpu() for key, value in rollouts_batch.items()}
rollouts.append(rollouts_batch_cpu)
# Items in dict need to be of same shape.
rollouts = common.merge_dict(rollouts, merge_fn=torch.cat)
# Estimating advantages outside the loop gives more samples for reward normalization.
advantages = self._estimate_advantage(
rewards=rollouts["shaped_rewards"].to(self.accelerator.device),
values=rollouts["values"].to(self.accelerator.device),
)
advantages = {key: value.cpu() for key, value in advantages.items()}
return {**rollouts, **advantages}
def post_reward(self, reward_outputs: Dict[str, Tensor], responses: Tensor) -> Dict[str, Tensor]:
"""Assign bad reward values to sequences which didn't stop properly."""
if self.args.truncate_token_ids is None:
return reward_outputs
def get_validity_mask(sequences: Tensor, end_token_id: int) -> Tensor:
"""Mark a batch element as False if the sequence doesn't end with `end_token_id` after `truncate_after`."""
assert sequences.dim() == 2
validity_mask = []
for sequence in sequences:
(nonzeros,) = (sequence == end_token_id).nonzero(as_tuple=True)
if len(nonzeros) == 0:
validity_mask.append(False)
else:
validity_mask.append(
self.args.truncate_after is None
or
# Last occurrence of `end_token_id` is after `truncate_after`.
nonzeros[-1] > self.args.truncate_after
)
return torch.tensor(validity_mask, device=sequences.device)
validity_masks = [get_validity_mask(responses, end_token_id) for end_token_id in self.args.truncate_token_ids]
validity_mask = torch.stack(validity_masks).any(dim=0) # Sequence is valid if it ends with any end token.
rewards = reward_outputs["rewards"]
rewards[~validity_mask] = self.args.penalty_reward_value
return reward_outputs
def compute_loss(self, rollouts: Dict[str, Tensor]) -> Tuple[Tensor, Dict]:
values, old_logprob, returns, advantages, queries, query_attn_masks, responses = common.prepare_inputs(
common.unpack_dict(
rollouts,
keys=("values", "logprobs", "returns", "advantages", "queries", "query_attn_masks", "responses"),
),
device=self.accelerator.device,
)
outputs = self.policy(queries, query_attn_masks, responses, temperature=self.args.temperature)
vpred = outputs["values"]
vpredclipped = torch.clamp(
vpred,
min=values - self.args.cliprange_value,
max=values + self.args.cliprange_value,
)
vf_losses1 = (vpred - returns) ** 2.0
vf_losses2 = (vpredclipped - returns) ** 2.0
vf_loss = 0.5 * torch.maximum(vf_losses1, vf_losses2).mean()
vf_clipfrac = (vf_losses2 > vf_losses1).to(torch.get_default_dtype()).mean()
logprob = outputs["logprobs"]
ratio = torch.exp(logprob - old_logprob)
# When current policy is close to the old policy, the KL component of this advantage is approximately correct.
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, min=1.0 - self.args.cliprange, max=1.0 + self.args.cliprange)
pg_loss = torch.maximum(pg_losses, pg_losses2).mean()
pg_clipfrac = (pg_losses2 > pg_losses).to(torch.get_default_dtype()).mean() # noqa
loss = pg_loss + self.args.vf_coef * vf_loss
entropy = outputs["entropies"].mean()
approxkl = 0.5 * ((logprob - old_logprob) ** 2.0).mean()
return_mean, return_var = returns.mean(), returns.var(unbiased=False)
value_mean, value_var = values.mean(), values.var(unbiased=False)
stats = dict(
loss=dict(policy=pg_loss, value=vf_loss, total=loss),
policy=dict(entropy=entropy, approxkl=approxkl, clipfrac=pg_clipfrac),
returns=dict(mean=return_mean, var=return_var),
val=dict(
vpred=vpred.mean(),
error=((vpred - returns) ** 2).mean(),
clipfrac=vf_clipfrac,
mean=value_mean,
var=value_var,
),
)
return loss, common.flatten_dict(stats, sep="/", postprocess_fn=lambda x: x.detach())
def record_step_stats(self, train_stats, rollouts, step_idx, **kwargs):
kl = rollouts["kl"]
kl_sum_seq, kl_avg_seq = kl.sum(dim=1).mean(dim=0), kl.mean()
shaped_rewards = rollouts["shaped_rewards"].sum(dim=1).mean(dim=0)
non_score_rewards = rollouts["non_score_rewards"].sum(dim=1).mean(dim=0)
rewards = rollouts["rewards"].mean(dim=0)
stats = {
f"objective/kl_coef": kwargs["kl_coef"],
f"objective/kl_sum_seq": kl_sum_seq,
f"objective/kl_avg_seq": kl_avg_seq,
f"objective/shaped_rewards": shaped_rewards,
f"objective/non_score_rewards": non_score_rewards,
f"objective/rewards": rewards, # Original model reward.
f"objective/lr": self.optimizer.param_groups[0]["lr"],
f"objective/entropies": rollouts["entropies"].mean(),
f"objective/ref_entropies": rollouts["ref_entropies"].mean(),
}
for k, v in train_stats.items():
stats[f"ppo/{k}"] = v.mean(dim=0)
stats = {key: value.item() if torch.is_tensor(value) else value for key, value in stats.items()}
if self.accelerator.is_main_process:
self.accelerator.log(stats, step=step_idx)
if self.args.output_dir is not None:
# Store rollout data to disk to debug.
rollouts_to_disk = {
key: self.tokenizer.batch_decode(
tensor, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
for key, tensor in common.unpack_dict(
rollouts, keys=("queries", "responses"), return_type=dict
).items()
}
rollouts_to_disk = pd.DataFrame(rollouts_to_disk).to_dict(orient="records")
utils.jdump(rollouts_to_disk, utils.join(self.args.output_dir, "rollouts", f"step_{step_idx}.json"))
return stats
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None, give_rw_access=True, check_corrupted=True):
# We don't use accelerator here because, we want to be frugal and only store the policy.
# Moreover, we want easy loadability -- calling .from_pretrained on the folder. Full dump wouldn't allow this.
# Logic:
# 1. Retrieve the complete state dict of the wrapped model.
# (retrieving state dict of submodule can lead to loss of keys)
# 2. Remove keys that are part of the value network.
# 3. Rename keys that are part of the policy network, so that they match the naming standard.
output_dir = self.args.output_dir if output_dir is None else output_dir
utils.makedirs(output_dir)
model, tokenizer = self.policy, self.tokenizer
with FSDP.state_dict_type(
model, StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
):
logger.warning("Gathering full state_dict...")
state_dict = model.state_dict()
logger.warning("Finished gathering full state_dict...")
if self.accelerator.is_main_process:
# Retain and remap policy keys.
new_state_dict = dict()
prefix = "policy.base_model."
for key, value in state_dict.items():
if key.startswith(prefix):
new_state_dict[key[len(prefix) :]] = value
state_dict = new_state_dict
if check_corrupted: # Let the checks run on GPU.
is_corrupted = any(value.isnan().any().item() for value in state_dict.values())
logger.warning(f"Is there nans in the state_dict to be dumped? {is_corrupted}")
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
unwrapped = unwrap_model(model).policy.base_model
assert isinstance(
unwrapped, (transformers.OPTForCausalLM, transformers.LlamaForCausalLM)
), f"Expected to save a generative policy, but found model to be of type: {type(unwrapped)}."
if hasattr(unwrapped, "_keys_to_ignore_on_save"):
logger.warning(f"keys to ignore on save: {unwrapped._keys_to_ignore_on_save}")
logger.warning(f"Saving model checkpoint to {output_dir}")
logger.warning(f"Saving {len(cpu_state_dict)} keys:\n{utils.jdumps(cpu_state_dict.keys())}")
unwrapped.save_pretrained(output_dir, state_dict=cpu_state_dict)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, constants.TRAINING_ARGS_NAME))
if give_rw_access:
try:
os.system(f"chmod -R a+xwr {output_dir}")
except Exception as e:
logger.fatal(f"Failed to give read-write access to {output_dir}: {e}")
def _make_left_padded_tokenizer(
model_name_or_path: AnyPath,
cache_dir: AnyPathOrNone = constants.DEFAULT_CACHE_DIR,
**kwargs,
) -> transformers.PreTrainedTokenizer:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir=cache_dir,
padding_side="left",
**kwargs,
)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens(dict(pad_token=constants.DEFAULT_PAD_TOKEN))
return tokenizer
def make_tokenizer(args):
# policy_tokenizer left pads, since the policy requires batch decoding.
policy_tokenizer = _make_left_padded_tokenizer(
args.policy_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
# reward_tokenizer left pads, since we need the embedding of the right most non-pad token.
reward_tokenizer = _make_left_padded_tokenizer(
args.reward_model_name_or_path, cache_dir=args.cache_dir, use_fast=args.use_fast_tokenizer
)
if policy_tokenizer.get_vocab() != reward_tokenizer.get_vocab():
raise ValueError("AlpacaFarm does not support different tokenizer for policy and reward models.")
return policy_tokenizer
def make_models(
tokenizer: transformers.PreTrainedTokenizer,
args,
accelerator: accelerate.Accelerator,
) -> dict:
def make_generative_policy():
base_model = common.make_generative_lm(
model_name_or_path=args.policy_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
utils.stable_resize_token_embeddings(base_model, len(tokenizer))
return base_model
def make_reward_model():
return reward_model_module.RewardModel.from_pretrained(
args.reward_model_name_or_path,
flash_attn=args.flash_attn,
mixed_precision=accelerator.mixed_precision,
cache_dir=args.cache_dir,
low_cpu_mem_usage=True,
device_map={"": accelerator.device},
)
# Model construction below seems convoluted, but it's made to trade time for RAM efficiency.
# For large models, object creation could be extremely RAM intensive.
# Especially so for multiple processes on single node, each starting off with a copy of the model.
# General strategy is to 1) create a model, 2) move it to target device / shard it, 3) then start next model,
# as opposed to creating all needed models on CPU first, and separately moving / sharding each.
policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
if args.init_value_with_reward:
# Initialize value from reward model a la OAI.
logger.warning("Initializing value model with reward model.")
value_model = rl_models.make_value_with_base_model(args, make_reward_model().backbone_model, tokenizer)
else:
logger.warning("Initializing value model with policy model.")
# Initialize value from policy. Works for sanity, but generally performs worse in instruction-following.
value_model = rl_models.make_value_with_base_model(args, make_generative_policy(), tokenizer)
actor_critic = rl_models.ActorCritic(policy=policy, value_model=value_model)
# We cast how respond should run. It's important the dtypes be consistent with training, since a bf16
# fine-tuned model might not work with fp16 inference.
# Cast step below must precede accelerator.prepare(), since wrapped model might not have `respond` method.
actor_critic = common.prepare_model_for_custom_fn(model=actor_critic, fn_name="respond", accelerator=accelerator)
actor_critic = accelerator.prepare(actor_critic) # noqa
ref_policy = rl_models.make_policy_with_base_model(args, make_generative_policy(), tokenizer)
ref_policy.requires_grad_(False)
ref_policy = accelerator.prepare(ref_policy) # noqa
reward_model = make_reward_model()
reward_model.requires_grad_(False)
reward_model = accelerator.prepare(reward_model)
# TODO: This is a hack to get FSDP running. Remove in the future when this is fixed.
if accelerator.distributed_type == accelerate.DistributedType.FSDP:
inputs = tokenizer("fsdp are you happy now??? :)" * 50, return_tensors="pt")
inputs = {key: value.to(accelerator.device) for key, value in inputs.items()}
actor_critic(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
return dict(policy=actor_critic, ref_policy=ref_policy, reward_model=reward_model)
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FIRST_STEP_IDX = 1
logger = logging.get_logger(__name__)
class RLTrainer(object):
def __init__(
self,
args,
train_dataset: data_preprocessor.QueryDataset,
eval_dataset: data_preprocessor.QueryDataset,
data_collator: Callable,
policy: nn.Module,
ref_policy: nn.Module,
reward_model: nn.Module,
tokenizer: transformers.PreTrainedTokenizer,
accelerator: accelerate_patch.MyAccelerator,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[LRScheduler] = None,
):
super(RLTrainer, self).__init__()
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.data_collator = data_collator
self.policy = policy
self.ref_policy = ref_policy
self.reward_model = reward_model
self.tokenizer = tokenizer
self.optimizer = optimizer
self.accelerator = accelerator
self.lr_scheduler = lr_scheduler
self.kl_ctl = kl_controller.make_kl_controller(args, self.accelerator)
self.log_history = []
self.args.set_truncate_token_ids(self.tokenizer)
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
@abc.abstractmethod
@torch.inference_mode()
def rollout(self, queries_data) -> Dict[str, Tensor]:
raise NotImplementedError
@abc.abstractmethod
def compute_loss(self, rollouts: Dict[str, Tensor]) -> Tuple[Tensor, Dict]:
raise NotImplementedError
@abc.abstractmethod
@torch.inference_mode()
def record_step_stats(self, train_stats, rollouts, step_idx, **kwargs):
raise NotImplementedError
@property
def optimizable_params(self):
return [p for p in self.policy.parameters() if p.requires_grad and p.grad is not None]
@torch.inference_mode()
def _compute_grad_norm(self):
grad_norm = torch.stack([p.grad.norm(2) for p in self.optimizable_params]).norm(2)
if (
self.accelerator.distributed_type == DistributedType.FSDP
and self.policy.sharding_strategy != ShardingStrategy.NO_SHARD
):
# When parameters are sharded, we need to gather each grad norm and then aggregate.
grad_norms = [torch.zeros_like(grad_norm) for _ in range(self.accelerator.num_processes)]
dist.all_gather(grad_norms, grad_norm)
grad_norm = torch.stack(grad_norms).norm(2)
return grad_norm
@torch.inference_mode()
def _compute_param_norm(self):
param_norm = torch.stack([p.norm(2) for p in self.optimizable_params]).norm(2)
if (
self.accelerator.distributed_type == DistributedType.FSDP
and self.policy.sharding_strategy != ShardingStrategy.NO_SHARD
):
# When parameters are sharded, we need to gather each grad norm and then aggregate.
param_norms = [torch.zeros_like(param_norm) for _ in range(self.accelerator.num_processes)]
dist.all_gather(param_norms, param_norm)
param_norm = torch.stack(param_norms).norm(2)
return param_norm
def _make_fsdp_happy(self):
"""Simply do a forward pass with the wrapped model at first.
FSDP has some weird bugs; need this flush before running a non-forward method!
This function should assume grad context of caller and
not be wrapped with `torch.no_grad` or `torch.enable_grad`!!!
"""
if self.accelerator.distributed_type == DistributedType.FSDP:
inputs = self.tokenizer("fsdp are you happy now? :)" * 50, return_tensors="pt")
inputs = common.prepare_inputs(inputs, device=self.accelerator.device)
self.policy(inputs["input_ids"], inputs["attention_mask"], inputs["input_ids"])
def step_with_rollouts(self, rollouts):
"""Based on fixed rollouts, run PPO for multiple epochs."""
assert isinstance(self.optimizer, AcceleratedOptimizer), (
"`optimizer` must be pushed through `accelerator.prepare`. "
"Otherwise the `accelerator.accumulate` context manager won't correctly disable `zero_grad` or `step`."
)
rollouts_dataloader = self.get_rollouts_dataloader(rollouts=rollouts)
stats_list = []
for epoch_idx in range(self.args.noptepochs):
for batch_idx, rollouts_batch in tqdm.tqdm(
enumerate(rollouts_dataloader, 1), disable=not self.accelerator.is_main_process, desc="gradstep"
):
with self.accelerator.accumulate(self.policy):
ppo_loss, stats_for_this_step = self.compute_loss(rollouts_batch)
self.accelerator.backward(ppo_loss)
if self.accelerator.sync_gradients:
# Gradient norm almost blows up at some point, but stabilizes eventually, even w/o clipping.
if self.args.max_grad_norm is not None:
self.accelerator.clip_grad_norm_(self.policy.parameters(), self.args.max_grad_norm)
stats_for_this_step["loss/grad_norm"] = self._compute_grad_norm()
stats_list.append(stats_for_this_step)
self.optimizer.step()
self.optimizer.zero_grad(set_to_none=True)
return common.merge_dict(stats_list, torch.stack) # list of dict -> dict: str -> 1-D tensor
def step(self, train_dataloader, step_idx: int):
queries_batches = [next(train_dataloader) for _ in range(self.args.rollout_accumulation_steps)]
rollouts = self.rollout(queries_batches)
train_stats = self.step_with_rollouts(rollouts)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
stats = self.record_step_stats(
rollouts=rollouts, train_stats=train_stats, step_idx=step_idx, kl_coef=self.kl_ctl.value
)
self.kl_ctl.step(stats["objective/kl_sum_seq"])
return stats
def create_optimizer_and_scheduler(self, num_training_steps: int):
optimizer = trainer_utils.create_optimizer(args=self.args, model=self.policy, optimizer=self.optimizer)
lr_scheduler = trainer_utils.create_scheduler(
args=self.args, optimizer=optimizer, lr_scheduler=self.lr_scheduler, num_training_steps=num_training_steps
)
self.optimizer, self.lr_scheduler = self.accelerator.prepare(optimizer, lr_scheduler)
self.accelerator.register_for_checkpointing(self.lr_scheduler) # LR scheduler needs another call to save.
return self.optimizer, self.lr_scheduler
def train(self):
"""Entry point for training."""
total_epochs = self.args.total_epochs
total_episodes = len(self.train_dataset) * total_epochs # noqa
total_steps = total_episodes // self.args.rollout_batch_size # noqa
logger.warning(
f"***Training starts***\n"
f"Total epochs: {total_epochs} => Total episodes: {total_episodes} => Total steps: {total_steps}"
)
self.create_optimizer_and_scheduler(total_steps)
infinite_train_dataloader = self.get_train_dataloader()
for step_idx in tqdm.tqdm(
range(FIRST_STEP_IDX, total_steps + FIRST_STEP_IDX),
disable=not self.accelerator.is_main_process,
desc="steps",
total=total_steps,
):
if step_idx % self.args.save_steps == 0 or step_idx in self.args.save_steps_extra_list:
self.save_model(utils.join(self.args.output_dir, f"checkpoint-{step_idx}"))
if self.args.eval_steps is not None and step_idx % self.args.eval_steps == 0:
self.evaluate(step_idx)
self.log_history.append(self.step(infinite_train_dataloader, step_idx))
return self.log_history
@torch.inference_mode()
def evaluate(self, step_idx: int, unwrapped_policy=None):
"""Evaluate by generating sequences with test prefixes.
FSDP compat: all devices should do the forward pass, since sharded params need to be summoned.
only write results in the main process.
"""
# TODO: unhardcode inference args.
logger.warning(f"Start evaluation at step: {step_idx}", main_process_only=True)
prompts, list_dict_data = self.eval_dataset.prompts, self.eval_dataset.list_dict_data
if any(item is None for item in (prompts, list_dict_data)):
logger.warning("No evaluation data, skipping evaluation.", main_process_only=True)
return
# Constants.
model_name = Path(self.args.output_dir).stem # Don't use the helper in common, as no checkpoint is saved yet.
model_name_at_step = f"{model_name}_ckpt_{step_idx}"
temperature = 0.7
del model_name
# Start evaluation.
self.policy.eval()
self._make_fsdp_happy()
if unwrapped_policy is None:
unwrapped_policy = self.accelerator.unwrap_model(self.policy, keep_fp32_wrapper=True)
unwrapped_policy = unwrapped_policy.policy.base_model
outputs = decode.decode_prompts_with_huggingface_given_model(
model=unwrapped_policy,
tokenizer=self.tokenizer,
prompts=prompts,
decoding_args=decode.HFDecodingArguments(max_new_tokens=self.args.response_len, temperature=temperature),
per_device_batch_size=self.args.per_device_eval_batch_size,
divide_work=False,
)
sequences = [i + o for i, o in utils.zip_(prompts, outputs)]
rewards = score.score_sequences_with_huggingface_given_model(
model=self.reward_model,
tokenizer=self.tokenizer,
sequences=sequences,
per_device_batch_size=self.args.rollout_per_device_batch_size,
divide_work=False,
)
if self.accelerator.is_main_process:
results = [
{"reward": reward, model_name_at_step: output, **example}
for reward, output, example in utils.zip_(rewards, outputs, list_dict_data)
]
if self.args.output_dir is not None:
utils.jdump(results, utils.join(self.args.output_dir, f"eval_results_{step_idx}.json"))
logger.warning(f"End evaluation at step: {step_idx}. Processed {len(results)} examples")
@abc.abstractmethod
@torch.inference_mode()
def save_model(self, output_dir: Optional[str] = None):
raise NotImplementedError
def _log_batch_size(self, loader: DataLoader, loader_name):
batch = next(iter(loader))
if isinstance(batch, torch.Tensor):
batch_size = batch.shape[0]
elif isinstance(batch, (list, tuple)):
batch_size = batch[0]
else:
tensor = list(batch.values())[0]
batch_size = tensor.size(0)
logger.warning(f"Batch size of {loader_name} dataloader: {batch_size}", main_process_only=True)
def get_train_dataloader(self):
logger.warning(f"Train dataset size: {len(self.train_dataset)}", main_process_only=True) # noqa
train_dataloader = DataLoader(
dataset=self.train_dataset,
collate_fn=self.data_collator,
batch_size=self.args.rollout_per_device_batch_size,
shuffle=True,
drop_last=True,
)
train_dataloader = self.accelerator.prepare(train_dataloader) # noqa
self._log_batch_size(train_dataloader, "train_dataloader")
return utils.InfiniteLoader(train_dataloader)
def get_rollouts_dataloader(self, rollouts: Dict[str, Tensor], shuffle=True, drop_last=True, keys=None):
if keys is None:
keys = tuple(rollouts.keys())
def collate_rollouts(instances: Sequence[tuple]):
return {key: torch.stack([instance[idx] for instance in instances]) for idx, key in enumerate(keys)}
rollouts_dataset = TensorDataset(*[rollouts[key] for key in keys])
rollouts_dataloader = DataLoader(
dataset=rollouts_dataset,
batch_size=self.args.step_per_device_batch_size,
collate_fn=collate_rollouts,
shuffle=shuffle,
drop_last=drop_last,
)
# Do not prepare, since we don't need to shard the rollouts sampled on each batch.
return rollouts_dataloader
|
# Copyright 2023 The Alpaca Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__)
@dataclass
class DataArguments:
dataset_path: str = field(default="tatsu-lab/alpaca_farm")
dataset_name: str = field(default="alpaca_instructions")
train_splits: List[str] = field(default_factory=lambda: ["unlabeled"])
eval_splits: List[str] = field(default_factory=lambda: ["val"])
prompt_dict_path: str = field(
default=None,
metadata={"help": "Path to the dictionary for the prompt to format examples."},
)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
wandb_project: str = field(default=constants.WANDB_PROJECT)
cache_dir: Optional[str] = field(default=constants.DEFAULT_CACHE_DIR)
flash_attn: bool = field(default=False)
optim: str = field(default="adamw_torch")
truncate_tokens: Optional[List[str]] = field(
default_factory=lambda: None,
metadata={
"help": "Tokens in strings to truncate at first occurrence. "
"This was used in original OAI summarization paper to avoid models returning incomplete sentences. "
},
)
truncate_after: Optional[int] = field(
default=None, metadata={"help": "Truncate after this number of tokens. Prevents early truncation."}
)
penalty_reward_value: float = field(
default=-1.0,
metadata={
"help": "Reward assigned to sequences that are truncated, "
"e.g., due to outputting incomplete sentences for given context window."
},
)
total_epochs: int = field(default=10)
rollout_batch_size: int = field(default=512)
step_batch_size: int = field(default=256)
rollout_per_device_batch_size: int = field(default=32)
step_per_device_batch_size: int = field(default=2)
noptepochs: int = field(default=2)
vf_coef: float = field(default=0.1)
cliprange: float = field(default=0.2)
cliprange_value: float = field(default=0.2)
gamma: float = field(default=1.0)
lam: float = field(default=1.0)
whiten_rewards: bool = field(default=True)
adam_epsilon: float = field(
default=1e-5,
metadata={
"help": "Epsilon for AdamW optimizer. "
"This is the default for OAI PPO code and UW Quark code. "
"This is not the Hugging Face default."
},
)
temperature: float = field(default=1.0)
kl_coef: float = field(default=0.2)
target_kl: float = field(default=6.0)
k_beta: float = field(default=0.1)
adaptive_kl: bool = field(default=False)
eval_batches: int = field(default=sys.maxsize, metadata={"help": "Maximum number of batches to evaluate on."})
init_value_with_reward: bool = field(
default=True, metadata={"help": "Initialize the value model with the reward model."}
)
save_steps_extra: Optional[str] = field(
default=None,
metadata={
"help": "A list of predetermined checkpoints to save, represented in the format 'no1__no2__no3'. "
"Parse this with str.split('__')."
},
)
query_len: int = field(default=192)
response_len: int = field(default=300)
policy_model_name_or_path: str = field(default=None)
reward_model_name_or_path: str = field(default=None)
use_fast_tokenizer: bool = field(
default=False,
metadata={
"help": "Use fast tokenizer if True. "
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
"Use fast tokenizer only if you can live with that."
},
)
def __post_init__(self):
# Super class' __post_init__ is very complicated; don't do super for now in case mess something up.
# super().__post_init__()
if self.tf32: # super().__post_init__() actually does this.
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = True # noqa
world_size = distributed_utils.get_world_size()
# Checks on rollout_batch_size only matter for PPO.
assert self.rollout_batch_size >= self.rollout_per_device_batch_size * world_size, (
"rollout_batch_size is smaller than rollout_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.rollout_batch_size % (self.rollout_per_device_batch_size * world_size) == 0
), "rollout_batch_size is not a multiple of rollout_per_device_batch_size * world_size. "
assert self.step_batch_size >= self.step_per_device_batch_size * world_size, (
"step_batch_size is smaller than step_per_device_batch_size * world_size. "
"Increase the former or decrease the latter to fix this."
)
assert (
self.step_batch_size % (self.step_per_device_batch_size * world_size) == 0
), "step_batch_size is not a multiple of step_per_device_batch_size * world_size. "
logger.warning(
f"Rollout stats:\n"
f"\trollout_batch_size: {self.rollout_batch_size}\n"
f"\trollout_per_device_batch_size: {self.rollout_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.rollout_batch_size // self.rollout_per_device_batch_size) % world_size == 0
self.rollout_accumulation_steps = self.rollout_batch_size // self.rollout_per_device_batch_size // world_size
logger.warning(
f"Step stats:\n"
f"\tstep_batch_size: {self.step_batch_size}\n"
f"\tstep_per_device_batch_size: {self.step_per_device_batch_size}\n"
f"\tworld_size: {world_size}\n",
)
assert (self.step_batch_size // self.step_per_device_batch_size) % world_size == 0
self.gradient_accumulation_steps = self.step_batch_size // self.step_per_device_batch_size // world_size
logger.warning(
f"Accumulation steps:\n"
f"\trollout_accumulation_steps: {self.rollout_accumulation_steps}\n"
f"\tgradient_accumulation_steps: {self.gradient_accumulation_steps}\n"
)
if self.save_steps_extra is not None:
self.save_steps_extra_list = [int(string) for string in self.save_steps_extra.split("__")]
else:
self.save_steps_extra_list = []
def set_truncate_token_ids(self, tokenizer: transformers.PreTrainedTokenizer):
"""Convert truncation token to token ids.
This is called in RLTrainer.
"""
truncate_tokens = self.truncate_tokens
if truncate_tokens is None:
truncate_token_ids = None
else:
truncate_token_ids = tokenizer.convert_tokens_to_ids(truncate_tokens)
self.truncate_token_ids = truncate_token_ids
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.