python_code
stringlengths 0
456k
|
---|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#!/usr/bin/env python
# run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l)
#
# usage:
# ./flatten_bench.py -t
# ./flatten_bench.py -c
# kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof
import argparse
import gc
import torch
from torch._utils import _flatten_dense_tensors
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from apex_C import flatten as flatten_apex
util_ops = UtilsBuilder().load()
flatten = util_ops.flatten
unflatten = util_ops.unflatten
torch.manual_seed(0)
# emulate a small typical model weights
x = [
torch.rand((512, 512)).to(get_accelerator().device_name()),
torch.rand((512, 1024)).to(get_accelerator().device_name()),
torch.rand((512, 30000)).to(get_accelerator().device_name())
]
t = x * 30
# warm up and check that the same output is produced
flat_py = _flatten_dense_tensors(t)
flat_cpp = flatten(t)
flat_apex = flatten_apex(t)
#numel = flat_cpp.numel()
assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor"
assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor"
TIMES = 1000
# the programs being tested
def py():
for i in range(TIMES):
flat = _flatten_dense_tensors(t)
def cpp():
for i in range(TIMES):
flat = flatten(t)
def apex():
for i in range(TIMES):
flat = flatten_apex(t)
#### cProfile ####
import cProfile
def cprofileme():
print("--------------- cProfile -----------------")
print("py")
cProfile.run("py()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("cpp")
cProfile.run("cpp()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("apex")
cProfile.run("apex()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
#### timeit ####
import timeit
def timeme():
print("--------------- timeit -----------------")
print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
#### line_profiler ####
# this one requires a special way to be called
# pip install line_profiler
# kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof
def line_profileme():
print("--------------- line_profiler -----------------")
print("py")
profile(py)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("cpp")
profile(cpp)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("apex")
profile(apex)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", action='store_true')
parser.add_argument("-c", action='store_true')
parser.add_argument("-t", action='store_true')
args = parser.parse_args()
if args.l:
line_profileme()
elif args.c:
cprofileme()
elif args.t:
timeme()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import sys
import unittest
sys.path.append('../DeepSpeedExamples/Megatron_GPT2')
sys.path.append('../DeepSpeedExamples/BingBertSquad')
# Import the test cases here.
import Megatron_GPT2
import BingBertSquad
def pytest_hack(runner_result):
'''This is an ugly hack to get the unittest suites to play nicely with
pytest. Otherwise failed tests are not reported by pytest for some reason.
Long-term, these model tests should be adapted to pytest.
'''
if not runner_result.wasSuccessful():
print('SUITE UNSUCCESSFUL:', file=sys.stderr)
for fails in runner_result.failures:
print(fails, file=sys.stderr)
assert runner_result.wasSuccessful() # fail the test
def test_megatron():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(Megatron_GPT2.suite()))
def test_megatron_checkpoint():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(Megatron_GPT2.checkpoint_suite()))
def test_squad():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(BingBertSquad.suite()))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import unittest
import subprocess
import os
import time
class BaseTestCase(unittest.TestCase):
def __init__(self, methodName="DeepSpeed performance test"):
super(BaseTestCase, self).__init__(methodName)
self.test_dir = "./test"
self.baseline_dir = "./baseline"
self.timestr = time.strftime("%Y%m%d-%H%M%S")
def gen_output_name(self, test_config, prefix, baseline_config=False):
other_args = test_config["other_args"] if "other_args" in test_config else ""
zero_args = "_zero" if "zero" in test_config and test_config["zero"] else ""
other_args = other_args.strip(' -\\').replace(" ", "").replace("\"", "")
if other_args:
other_args = "_" + other_args
if test_config["deepspeed"] and not baseline_config:
file_name = "_mp{0}_gpu{1}_node{2}_bs{3}_step{4}_layer{5}_hidden{6}_seq{7}_head{8}{9}_ds{10}-{11}.log".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
other_args, zero_args, self.timestr)
save_dir = self.test_dir
else:
file_name = "_mp{0}_gpu{1}_node{2}_bs{3}_step{4}_layer{5}_hidden{6}_seq{7}_head{8}{9}.log".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
other_args)
save_dir = self.baseline_dir
return os.path.join(save_dir, prefix + file_name)
def ensure_directory_exists(self, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def clean_test_env(self):
cmd = "dlts_ssh pkill -9 -f /usr/bin/python"
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
time.sleep(20)
def run_gpt2_test(self, test_config, output):
ds_flag = "-d " + test_config["json"] if test_config["deepspeed"] else ""
ckpt_num = test_config["ckpt_num_layers"] if "ckpt_num_layers" in test_config else 1
other_args = "-o " + test_config["other_args"] if "other_args" in test_config else ""
cmd = "./ds_gpt2_test.sh -m {0} -g {1} -n {2} -b {3} -s {4} -l {5} -h {6} -q {7} -e {8} -c {9} {10} {11}".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
ckpt_num, other_args, ds_flag)
self.ensure_directory_exists(output)
with open(output, "w") as f:
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash', stdout=f, stderr=f)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
from .run_func_test import GPT2FuncTestCase
from .run_checkpoint_test import GPT2CheckpointTestCase, checkpoint_suite
from .run_func_test import suite
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import re
from test_common import BaseTestCase
class GPT2PerfTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed performance test on GPT2 model"):
super(GPT2PerfTestCase, self).__init__(methodName)
def test_perf_1_5B(self):
test_config = {
"mp": 1,
"gpus": 16,
"nodes": 4,
"bs": 32,
"steps": 100,
"layers": 48,
"hidden_size": 1600,
"seq_length": 1024,
"heads": 16,
"deepspeed": True,
"json": "ds_config_perf_bs32.json",
}
self.run_test(test_config)
def test_perf_4B(self):
test_config = {
"mp": 1,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 64,
"hidden_size": 2304,
"seq_length": 1024,
"heads": 16,
"deepspeed": True,
"json": "ds_config_perf_bs8.json",
}
self.run_test(test_config)
def test_perf_8B(self):
test_config = {
"mp": 2,
"gpus": 16,
"nodes": 4,
"bs": 16,
"steps": 100,
"layers": 72,
"hidden_size": 3072,
"seq_length": 1024,
"heads": 24,
"deepspeed": True,
"json": "ds_config_perf_bs16.json",
}
self.run_test(test_config)
def test_perf_20B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 50,
"layers": 111,
"hidden_size": 3808,
"seq_length": 1024,
"heads": 32,
"ckpt_num_layers": 1,
"deepspeed": True,
"json": "ds_config_perf_bs8.json",
}
self.run_test(test_config)
def run_test(self, test_config):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_perf"
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
exec_time = self.grep_latency_from_file(test_file)
if exec_time == 0.0:
print("{0}: no latency found in file {1}".format(self.id(), test_file))
else:
print("{0}: execution time per iteration is {1}ms.".format(self.id(), exec_time))
def grep_latency_from_file(self, file_name):
latency = 0.0
count = 0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "elapsed time per iteration"
match_number = re.compile(r'elapsed time per iteration \(ms\): ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
ms_per_iter = re.findall(match_number, line)
latency += float(ms_per_iter[0])
count += 1
if count > 0:
latency /= count
return latency
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2PerfTestCase('test_perf_1_5B'))
suite.addTest(GPT2PerfTestCase('test_perf_4B'))
suite.addTest(GPT2PerfTestCase('test_perf_8B'))
suite.addTest(GPT2PerfTestCase('test_perf_20B'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import re
from test_common import BaseTestCase
class GPT2PerfBaselineTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed performance test on GPT2 model"):
super(GPT2PerfBaselineTestCase, self).__init__(methodName)
def test_perf_1_5B(self):
test_config = {
"mp": 2,
"gpus": 16,
"nodes": 4,
"bs": 16,
"steps": 100,
"layers": 48,
"hidden_size": 1600,
"seq_length": 1024,
"heads": 16,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_4B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 64,
"hidden_size": 2304,
"seq_length": 1024,
"heads": 16,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_8B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 72,
"hidden_size": 3072,
"seq_length": 1024,
"heads": 24,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_20B(self):
test_config = {
"mp": 16,
"gpus": 16,
"nodes": 4,
"bs": 4,
"steps": 50,
"layers": 111,
"hidden_size": 3808,
"seq_length": 1024,
"heads": 32,
"ckpt_num_layers": 1,
"deepspeed": False,
}
self.run_test(test_config)
def run_test(self, test_config):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_perf"
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
exec_time = self.grep_latency_from_file(test_file)
if exec_time == 0.0:
print("{0}: no latency found in file {1}".format(self.id(), test_file))
else:
print("{0}: execution time per iteration is {1}ms.".format(self.id(), exec_time))
def grep_latency_from_file(self, file_name):
latency = 0.0
count = 0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "elapsed time per iteration"
match_number = re.compile(r'elapsed time per iteration \(ms\): ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
ms_per_iter = re.findall(match_number, line)
latency += float(ms_per_iter[0])
count += 1
if count > 0:
latency /= count
return latency
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2PerfBaselineTestCase('test_perf_1_5B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_4B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_8B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_20B'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import subprocess
import os
import re
from .test_common import BaseTestCase
LAYERS = 2
HIDDEN_SIZE = 128
ATTN_HEADS = 8
def remove_file(test_id, filename):
cmd = f"if [ -f {filename} ] ; then rm -v {filename}; fi"
print(f"{test_id} cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "validation loss at the end of training for test data | LM loss:"
match_number = re.compile(r'LM loss: ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class GPT2CheckpointTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on GPT2 model"):
super(GPT2CheckpointTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_mp2_gpu4_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero2_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_without_zero(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"zero": False,
"other_args": "",
"tag": "ds_without_zero",
"checkpoint_name": "ckpt_mp4_gpu16_wo_zero",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_no_zero.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def gen_name(self, test_config, prefix):
save_dir = "checkpoint_test_logs"
tag = test_config["tag"]
checkpoint_name = test_config["checkpoint_name"]
file_name = f"_{tag}_{checkpoint_name}.log"
return os.path.join(save_dir, prefix + file_name)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
# Cache save and load gpu counts
save_gpus = test_config["gpus"]
if "load_gpus" in test_config:
load_gpus = test_config["load_gpus"]
del test_config["load_gpus"]
else:
load_gpus = test_config["gpus"]
# save to current directory.
checkpoint_folder = test_config["checkpoint_name"]
checkpoint_interval = test_config["checkpoint_interval"]
checkpoint_name = test_config["checkpoint_name"]
#---------------remove old checkpoint---------------#
try:
cmd = f"rm -rf {checkpoint_name}"
print(f"{self.id()} cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print("No old checkpoint")
if "cpu_optimizer" in test_config and test_config["cpu_optimizer"]:
cpu_optimizer_flag = " --cpu-optimizer"
else:
cpu_optimizer_flag = ""
#-----------------Saving Checkpoint-----------------#
# building checkpoint arguments
test_config[
"other_args"] = f"\"--save {checkpoint_folder} --save-interval {checkpoint_interval} {cpu_optimizer_flag}\""
prefix = "gpt2_saving_checkpoint"
# create checkpoint run...
base_file = self.gen_name(test_config, prefix)
# remove previous test log
try:
cmd = f"rm {base_file}"
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} No old logs")
print("{0}: Run for saving checkpoint".format(self.id()))
self.run_gpt2_test(test_config, base_file)
#-----------------Loading Checkpoint-----------------#
# building checkpoint arguments
test_config["other_args"] = f"\"--load {checkpoint_folder} {cpu_optimizer_flag} \""
# set checkpoint load iteration
try:
cmd = f"echo {checkpoint_interval} > {checkpoint_name}/latest_checkpointed_iteration.txt"
print(f"{self.id()} running cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} Failed to update the checkpoint iteration file")
return False
prefix = "gpt2_loading_checkpoint"
# set load gpus
test_config["gpus"] = load_gpus
print("{0}: Second run loading checkpoint and continuing.".format(self.id()))
test_file = self.gen_name(test_config, prefix)
# remove previous test log
try:
cmd = f"rm {test_file}"
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} no previous logs for")
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def checkpoint_suite():
suite = unittest.TestSuite()
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero2_offload'))
# Shrink DP
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero2_offload'))
# Expand DP
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_without_zero'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(checkpoint_suite())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import os
import re
from .test_common import BaseTestCase
LAYERS = 2
HIDDEN_SIZE = 128
ATTN_HEADS = 8
SEQ_LEN = 64
MASTER_PORT = 29700
def grep_loss_from_file(file_name):
loss = 0.0
print(f'grepping {file_name}')
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "validation loss at the end of training for test data | LM loss:"
match_number = re.compile(r'LM loss: ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class GPT2FuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on GPT2 model"):
super(GPT2FuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_mp1_gpu2_node1_fp16(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_no_zero.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero1(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero1(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2_ds_offload(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.02)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2_ds_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.02)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_gas(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": True,
"json": "ds_config_func_bs8_zero2_gas3.json",
"baseline": "ds_config_func_bs8_zero0_gas3.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
succ = self.run_partition_activations_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_ds_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.02)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.02)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2_ds_offload(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.02)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.02)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2_torch_offload(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2_torch_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_torch_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2_torch_offload(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
def test_optimizer_scheduler(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 20,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_scheduler.json",
}
succ = self.run_test(test_config, 0.01)
# assure no crash.
self.assertTrue(True)
def run_partition_activations_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
baseline_prefix = "gpt2_func_"
prefix = "gpt2_partition_activation_"
deepspeed_config = test_config["json"]
baseline_deepspeed_config = False
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, True)
# baseline run...
# turnoff deepspeed if baseline deepspeed config
# is not provided
if not "baseline" in test_config:
test_config["deepspeed"] = False
else:
test_config["json"] = test_config["baseline"]
baseline_prefix += test_config["json"][0:-5]
baseline_deepspeed_config = True
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
base_file = self.gen_output_name(test_config, baseline_prefix, baseline_config=baseline_deepspeed_config)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, False)
test_config["other_args"] = f"\"--deepspeed-activation-checkpointing {cpu_optimizer_flag}\""
test_config["json"] = deepspeed_config
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_func"
baseline_prefix = prefix
deepspeed_config = test_config["json"]
baseline_deepspeed_config = False
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, True)
# baseline run...
# turn off deepspeed if a baseline deepspeed config
# is not provided
if not "baseline" in test_config:
test_config["deepspeed"] = False
else:
test_config["json"] = test_config["baseline"]
baseline_prefix = prefix + test_config["json"][0:-5]
baseline_deepspeed_config = True
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
# baseline run...
base_file = self.gen_output_name(test_config, baseline_prefix, baseline_config=baseline_deepspeed_config)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, False)
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def gen_cpu_optimizer_flag(self, test_config, is_baseline):
if 'cpu_optimizer' in test_config and test_config['cpu_optimizer']:
cpu_optimizer_flag = "--cpu-optimizer"
if is_baseline:
cpu_optimizer_flag += " --cpu_torch_adam"
return cpu_optimizer_flag
if 'test_torch_offload' in test_config and test_config['test_torch_offload']:
cpu_optimizer_flag += " --cpu_torch_adam"
return cpu_optimizer_flag
else:
cpu_optimizer_flag = ""
return cpu_optimizer_flag
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_fp16'))
# Baseline = Megatron + Torch.Optim.Adam
# Test = Megatron + Torch.Optim.Adam + ZeRO-Offload
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2_torch_offload'))
# Baseline = Megatron + Torch.Optim.Adam
# Test = Megatron + DeepSpeedAdam + ZeRO-Offload
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_gas'))
suite.addTest(GPT2FuncTestCase('test_optimizer_scheduler'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import unittest
import subprocess
import os
import time
class BaseTestCase(unittest.TestCase):
def __init__(self, methodName="DeepSpeed performance test"):
super(BaseTestCase, self).__init__(methodName)
self.test_dir = "./test"
self.baseline_dir = "./baseline"
self.timestr = time.strftime("%Y%m%d-%H%M%S")
def gen_output_name(self, test_config, prefix):
other_args = test_config["other_args"] if "other_args" in test_config else ""
zero_args = "_zero" if "zero" in test_config and test_config["zero"] else ""
other_args = other_args.strip(' -\\').replace(" ", "").replace("\"", "")
if other_args:
other_args = "_" + other_args
if test_config["deepspeed"]:
file_name = "_gpu{0}_{1}_ds{2}-{3}.log".format(test_config["gpus"], other_args, zero_args, self.timestr)
save_dir = self.test_dir
else:
file_name = "_gpu{0}_{1}.log".format(test_config["gpus"], other_args)
save_dir = self.baseline_dir
return os.path.join(save_dir, prefix + file_name)
def ensure_directory_exists(self, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def clean_test_env(self):
cmd = "dlts_ssh pkill -9 -f /usr/bin/python"
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
time.sleep(20)
def run_BingBertSquad_test(self, test_config, output):
ds_flag = " -d --deepspeed_config " + test_config["json"] if test_config["deepspeed"] else " "
other_args = " " + test_config["other_args"] if "other_args" in test_config else " "
cmd = "./run_BingBertSquad_sanity.sh -e 1 -g {0} {1} {2}".format(test_config["gpus"], other_args, ds_flag)
self.ensure_directory_exists(output)
with open(output, "w") as f:
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash', stdout=f, stderr=f)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .BingBertSquad_run_func_test import BingBertSquadFuncTestCase
from .BingBertSquad_run_func_test import suite
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import os
import re
from .BingBertSquad_test_common import BaseTestCase
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "bert_squad_progress: step="
match_number = re.compile(r'loss=([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class BingBertSquadFuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on BingBertSquad model"):
super(BingBertSquadFuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_gpu4_fp16(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu4_fp16_zero2(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_zero2_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp16(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu4_fp32(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp32(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "BingBertSquad_func"
test_config['other_args'] += f" --max_steps {test_config['max_steps']}"
test_config['other_args'] += f" --max_steps_per_epoch {test_config['max_epoch_steps']}"
# baseline run...
test_config["deepspeed"] = False
base_file = self.gen_output_name(test_config, prefix)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_BingBertSquad_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_BingBertSquad_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def suite():
suite = unittest.TestSuite()
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16_zero2'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp32'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp32'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import subprocess as sp
import os
from math import isclose
import sys
import pytest
import json
sys.path.append("../../../DeepSpeedExamples/BingBertSquad")
import evaluate as eval
squad_dir = "/data/BingBertSquad"
base_dir = "../../../DeepSpeedExamples/BingBertSquad"
script_file_name = "run_squad_deepspeed.sh"
model_file_name = "training_state_checkpoint_162.tar"
eval_file_name = "dev-v1.1.json"
pred_file_name = "predictions.json"
num_gpus = "4"
timeout_sec = 5 * 60 * 60 # 5 hours
eval_version = "1.1"
def create_config_file(tmpdir, zeroenabled=False):
config_dict = {
"train_batch_size": 24,
"train_micro_batch_size_per_gpu": 6,
"steps_per_print": 10,
"optimizer": {
"type": "Adam",
"params": {
"lr": 3e-5,
"weight_decay": 0.0,
"bias_correction": False
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
config_dict["zero_optimization"] = zeroenabled
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def test_e2e_squad_deepspeed_base(tmpdir):
config_file = create_config_file(tmpdir)
# base run results => {"exact_match": 83.9829706717124, "f1": 90.71138132004097}
expected_exact_match = 83.98
expected_f1 = 90.71
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen(["bash", script_file_name, num_gpus, model_file, squad_dir, output_dir, config_file], cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"], expected_exact_match, abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
def test_e2e_squad_deepspeed_zero(tmpdir):
config_file = create_config_file(tmpdir, True)
# base run results => {"exact_match": 84.1438032166509, "f1": 90.89776136505441}
expected_exact_match = 84.14
expected_f1 = 90.89
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen(["bash", script_file_name, num_gpus, model_file, squad_dir, output_dir, config_file], cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"], expected_exact_match, abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import json
import argparse
import torch
import deepspeed
from torch.utils.data.distributed import DistributedSampler
import deepspeed.comm as dist
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.layers2 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim)])
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden = x
hidden = self.linear(hidden)
return self.cross_entropy_loss(hidden, y)
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def get_data_loader(model, total_samples, hidden_dim, device):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=torch.half)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
def get_args(tmpdir, config_dict):
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--zero', type=int, default=0)
args = parser.parse_args() #args=''
config_dict["zero_optimization"]["stage"] = args.zero
print('config_dict["zero_optimization"]', config_dict["zero_optimization"])
config_path = create_config_from_dict(tmpdir, config_dict)
args.deepspeed_config = config_path
return args
def print0(msg):
if dist.get_rank() == 0:
print(msg, flush=True)
rank = int(os.environ['RANK'])
print('seed:', 2222 + rank)
torch.random.manual_seed(2222 + rank)
config_dict = {
"train_batch_size": 8,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 15
},
"zero_optimization": {
"stage": 0,
"reduce_bucket_size": 20,
"stage3_model_persistence_threshold": 10
}
}
# "initial_scale_power": 15
args = get_args('/tmp/', config_dict)
hidden_dim = 32
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=True)
def print_params(tag, model):
if dist.get_rank() == 0:
for n, p in model.named_parameters():
print0("{} {}:{}".format(tag, n, p))
data_loader = get_data_loader(model=model, total_samples=1000, hidden_dim=hidden_dim, device=model.device)
#print_params('pre-train', model)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0:
print("LOSS:", loss.item())
model.backward(loss)
model.step()
#print_params('step={}'.format(n), model)
if n == 5: break
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
###################################
# Setup
###################################
class VerboseLinear(torch.nn.Linear):
def __init__(self, **kwargs):
print(f'Begin VerboseLinear.__init__')
super().__init__(**kwargs)
print(f'End VerboseLinear.__init__')
class LinearStack(torch.nn.Module):
def __init__(self, input_dim=2, hidden_dim=4, output_dim=4, num_layers=2):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.input_layer = VerboseLinear(in_features=self.input_dim, out_features=self.hidden_dim)
self.layers = torch.nn.ModuleList([
torch.nn.Linear(in_features=self.hidden_dim, out_features=self.hidden_dim, bias=False)
for x in range(num_layers)
])
self.output_layer = torch.nn.Linear(in_features=self.hidden_dim, out_features=self.output_dim)
self.identity = torch.nn.Identity()
def forward(self, x):
x = self.input_layer(x)
for layer in self.layers:
x = layer(x)
x = self.output_layer(x)
x = self.identity(x)
return x
###################################
# DRIVER
###################################
def test_driver():
print()
print('BUILDING MODEL')
with deepspeed.zero.Init():
model = LinearStack()
print()
# parted = [name for (name, p) in model.named_parameters() if p._partitioned]
# not_parted = [name for (name, p) in model.named_parameters() if not p._partitioned]
# print('partitioned: ', parted)
# print('full: ', not_parted)
# print()
model.train()
test_input = torch.rand(1, model.input_dim)
grad_output = torch.rand(1, model.output_dim)
grad_output.requires_grad = False
test_input.requires_grad = False
print()
print('BEGINNING FORWARD')
print()
output = model(test_input)
output.backward(grad_output)
# parted = [name for (name, p) in model.named_parameters() if p._partitioned]
# not_parted = [name for (name, p) in model.named_parameters() if not p._partitioned]
# print('partitioned: ', parted)
# print('full:' , not_parted)
# print()
#samyamspeed.disable()
test_driver()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import json
import argparse
import torch
import deepspeed
from torch.utils.data.distributed import DistributedSampler
import deepspeed.comm as dist
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.layers2 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim)])
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden = x
hidden = self.linear(hidden)
return self.cross_entropy_loss(hidden, y)
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def get_data_loader(model, total_samples, hidden_dim, device):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=torch.half)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
def get_args(tmpdir, config_dict):
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--zero', type=int, default=3)
args = parser.parse_args() #args=''
config_dict["zero_optimization"]["stage"] = args.zero
# print('config_dict["zero_optimization"]', config_dict["zero_optimization"])
config_path = create_config_from_dict(tmpdir, config_dict)
args.deepspeed_config = config_path
return args
def print0(msg):
if dist.get_rank() == 0:
print(msg, flush=True)
rank = int(os.environ['RANK'])
print('seed:', 2222 + rank)
torch.random.manual_seed(2222 + rank)
config_dict = {
"train_batch_size": 8,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 15
},
"zero_optimization": {
"stage": 3,
"reduce_bucket_size": 20,
"mics_shard_size": 4,
"mics_hierarchical_params_gather": True,
"stage3_model_persistence_threshold": 10
}
}
# "initial_scale_power": 15
args = get_args('/tmp/', config_dict)
hidden_dim = 32
# with deepspeed.zero.Init():
model = SimpleModel(hidden_dim, empty_grad=False)
# print('------> init model with deepspeed.zero.Init()')
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=True)
def print_params(tag, model):
if dist.get_rank() == 0:
for n, p in model.named_parameters():
print0("{} {}:{}".format(tag, n, p))
data_loader = get_data_loader(model=model, total_samples=1000, hidden_dim=hidden_dim, device=model.device)
#print_params('pre-train', model)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0:
print("LOSS:", loss.item())
model.backward(loss)
model.step()
#print_params('step={}'.format(n), model)
if n == 5: break
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.pt.deepspeed_linear import LinearModuleForZeroStage3
from deepspeed.pt.log_utils import logger
from deepspeed.accelerator import get_accelerator
def see_memory_usage(message):
# Print message except when distributed but not rank 0
logger.info(message)
logger.info(
"Memory Allocated %s GigaBytes ",
get_accelerator().memory_allocated() / (1024 * 1024 * 1024),
)
logger.info(
"Max Memory Allocated %s GigaBytes",
get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),
)
logger.info(
"Cache Allocated %s GigaBytes",
get_accelerator().memory_cached() / (1024 * 1024 * 1024),
)
logger.info(
"Max cache Allocated %s GigaBytes",
get_accelerator().max_memory_cached() / (1024 * 1024 * 1024),
)
tens = torch.rand(1024, 16384, dtype=torch.half, device=torch.device(get_accelerator().device_name()))
tens_back = tens.detach().clone()
#linear_bk = torch.nn.functional.linear
#torch.nn.functional.linear = deepspeed.pt.deepspeed_linear.LinearFunctionForZeroStage3.apply
model = LinearModuleForZeroStage3(16384, 16384)
model.to(get_accelerator().device_name()).half()
see_memory_usage("Before forward")
y = model(tens)
see_memory_usage("After forward")
model.weight.data = torch.zeros(1, dtype=torch.half, device=torch.device(get_accelerator().device_name()))
see_memory_usage("After weight zero")
y.backward(tens_back)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from transformers import AutoModelForCausalLM
import deepspeed
import argparse
from deepspeed.accelerator import get_accelerator
deepspeed.runtime.utils.see_memory_usage('pre test', force=True)
model = AutoModelForCausalLM.from_pretrained('facebook/opt-350M').half().to(get_accelerator().device_name())
parser = argparse.ArgumentParser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
deepspeed.runtime.utils.see_memory_usage('post test', force=True)
m, _, _, _ = deepspeed.initialize(model=model, args=args, enable_hybrid_engine=True)
m.eval()
input = torch.ones(1, 16, device='cuda', dtype=torch.long)
out = m(input)
m.train()
out = m(input)
print(out['logits'], out['logits'].norm())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
from deepspeed.accelerator import get_accelerator
device = 'cpu'
model_size = 1 * 1024**3
param = torch.nn.Parameter(torch.ones(model_size, device=device))
param_fp16 = torch.nn.Parameter(torch.ones(model_size, dtype=torch.half, device=get_accelerator().device_name(0)))
optimizer = DeepSpeedCPUAdam([param])
#torch.set_num_threads(128)
param.grad = torch.ones(model_size, device=device)
avg = 0
for i in range(100):
start = time.time()
optimizer.step(fp16_param_groups=[param_fp16])
stop = time.time()
avg += (stop - start)
param.grad = torch.ones(model_size, device=device) * 2
print("Elapsed Time is ", avg / 100)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
import time
NUM_ITERS = 100
def _test_perf(param, optimizer_func):
optimizer = optimizer_func(param)
avg = 0
for i in range(NUM_ITERS):
for i, p in enumerate(param):
p.grad = torch.ones_like(p) * 2
start = time.time()
optimizer.step()
stop = time.time()
avg += (stop - start)
return avg / NUM_ITERS
def _main():
device = 'cpu'
model_size = 1 * 1024**3
group_size = [model_size, 274432]
param = [torch.nn.Parameter(torch.ones(size, device=device)) for size in group_size]
torch_time = _test_perf(param, torch.optim.Adagrad)
ds_time = _test_perf(param, DeepSpeedCPUAdagrad)
print(f"Step time: {torch_time=} {ds_time=}")
_main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
NUM_ITERS = 100
def _test_perf(param, optimizer_func):
optimizer = optimizer_func(param)
avg = 0
for i in range(NUM_ITERS):
for i, p in enumerate(param):
p.grad = torch.ones_like(p) * 2
start = time.time()
optimizer.step()
stop = time.time()
avg += (stop - start)
return avg / NUM_ITERS
def _main():
device = 'cpu'
model_size = 1 * 1024**3
group_size = [model_size, 274432]
param = [torch.nn.Parameter(torch.ones(size, device=device)) for size in group_size]
torch_time = _test_perf(param, torch.optim.Adam)
ds_time = _test_perf(param, DeepSpeedCPUAdam)
print(f"Step time: {torch_time=} {ds_time=}")
_main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from packaging import version as pkg_version
with open('../version.txt') as fd:
version = pkg_version.parse(fd.read())
with open('../version.txt', 'w') as fd:
fd.write(f'{version.major}.{version.minor}.{version.micro + 1}\n')
print(f'{version} -> {version.major}.{version.minor}.{version.micro + 1}')
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = 'DeepSpeed'
copyright = '2020, Microsoft'
author = 'Microsoft'
# The full version, including alpha/beta/rc tags
with open("../../../version.txt", "r") as f:
release = f.readline().rstrip()
master_doc = 'index'
autodoc_member_order = 'bysource'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_rtd_theme',
'sphinxcontrib.autodoc_pydantic',
'sphinx.ext.autosectionlabel',
]
pygments_style = 'sphinx'
# autodoc_pyandtic config
autodoc_pydantic_model_show_field_summary = False
autodoc_pydantic_field_signature_prefix = ' '
autodoc_pydantic_model_signature_prefix = 'class'
autodoc_pydantic_model_show_json = False
autodoc_pydantic_model_show_config_summary = False
autodoc_pydantic_model_show_config_member = False
autodoc_pydantic_model_show_validator_summary = False
autodoc_pydantic_model_show_validator_members = False
autodoc_pydantic_model_summary_list_order = 'bysource'
autodoc_pydantic_model_member_order = 'bysource'
autodoc_pydantic_field_list_validators = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# GitHub integration
html_context = {
"display_github": True,
"github_user": "microsoft",
"github_repo": "DeepSpeed",
"github_version": "master",
"conf_py_path": "/docs/code-docs/source/",
}
sys.path.insert(0, os.path.abspath('../../../'))
# Prepend module names to class descriptions?
add_module_names = True
autoclass_content = 'auto'
autodoc_mock_imports = ["apex", "mpi4py", "tensorboardX", "numpy", "cupy"]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pkgutil
import importlib
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.cuda
except ImportError:
pass
class CUDA_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cuda'
self._communication_backend_name = 'nccl'
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'cuda'
return 'cuda:{}'.format(device_index)
def device(self, device_index=None):
return torch.cuda.device(device_index)
def set_device(self, device_index):
torch.cuda.set_device(device_index)
def current_device(self):
return torch.cuda.current_device()
def current_device_name(self):
return 'cuda:{}'.format(torch.cuda.current_device())
def device_count(self):
return torch.cuda.device_count()
def synchronize(self, device_index=None):
return torch.cuda.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.cuda.set_rng_state(new_state)
return torch.cuda.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.cuda.get_rng_state()
return torch.cuda.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.cuda.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.cuda.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.cuda.initial_seed(seed)
def default_generator(self, device_index):
return torch.cuda.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.cuda.Stream
def stream(self, stream):
return torch.cuda.stream(stream)
def current_stream(self, device_index=None):
return torch.cuda.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.cuda.default_stream(device_index)
@property
def Event(self):
return torch.cuda.Event
# Memory management
def empty_cache(self):
return torch.cuda.empty_cache()
def memory_allocated(self, device_index=None):
return torch.cuda.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.cuda.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.cuda.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.cuda.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.cuda.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.cuda.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'memory_stats'):
return torch.cuda.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'reset_peak_memory_stats'):
return torch.cuda.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'memory_reserved'):
return torch.cuda.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'max_memory_reserved'):
return torch.cuda.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.cuda.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.cuda.is_bf16_supported()
def is_fp16_supported(self):
major, _ = torch.cuda.get_device_capability()
if major >= 7:
return True
else:
return False
# Misc
def amp(self):
if hasattr(torch.cuda, 'amp'):
return torch.cuda.amp
return None
def is_available(self):
return torch.cuda.is_available()
def range_push(self, msg):
if hasattr(torch.cuda.nvtx, 'range_push'):
return torch.cuda.nvtx.range_push(msg)
def range_pop(self):
if hasattr(torch.cuda.nvtx, 'range_pop'):
return torch.cuda.nvtx.range_pop()
def lazy_call(self, callback):
return torch.cuda._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.cuda.BFloat16Tensor
@property
def ByteTensor(self):
return torch.cuda.ByteTensor
@property
def DoubleTensor(self):
return torch.cuda.DoubleTensor
@property
def FloatTensor(self):
return torch.cuda.FloatTensor
@property
def HalfTensor(self):
return torch.cuda.HalfTensor
@property
def IntTensor(self):
return torch.cuda.IntTensor
@property
def LongTensor(self):
return torch.cuda.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cuda:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = None
def _lazy_init_class_dict(self):
if self.class_dict != None:
return
else:
self.class_dict = {}
# begin initialize for create_op_builder()
# put all valid class name <--> class type mapping into class_dict
op_builder_dir = self.op_builder_dir()
op_builder_module = importlib.import_module(op_builder_dir)
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
# avoid self references
if module_name != 'all_ops' and module_name != 'builder' and module_name != 'cpu':
module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
for member_name in module.__dir__():
if member_name.endswith(
'Builder'
) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
if not member_name in self.class_dict:
self.class_dict[member_name] = getattr(module, member_name)
# end initialize for create_op_builder()
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import abc
from abc import ABC
class DeepSpeedAccelerator(ABC):
def __init__(self):
self._name = None
self._communication_backend_name = None
@abc.abstractmethod
def is_synchronized_device(self):
...
# Device APIs
@abc.abstractmethod
def device_name(self, device_index):
...
@abc.abstractmethod
def device(self, device_index):
...
@abc.abstractmethod
def set_device(self, device_index):
...
@abc.abstractmethod
def current_device(self):
...
@abc.abstractmethod
def current_device_name(self):
...
@abc.abstractmethod
def device_count(self):
...
@abc.abstractmethod
def synchronize(self, device_index=None):
...
# RNG APIs
@abc.abstractmethod
def random(self):
...
@abc.abstractmethod
def set_rng_state(self, new_state, device_index=None):
...
@abc.abstractmethod
def get_rng_state(self, device_index=None):
...
@abc.abstractmethod
def manual_seed(self, seed):
...
@abc.abstractmethod
def manual_seed_all(self, seed):
...
@abc.abstractmethod
def initial_seed(self, seed):
...
@abc.abstractmethod
def default_generator(self, device_index):
...
# Streams/Events
@property
@abc.abstractmethod
def Stream(self):
...
@abc.abstractmethod
def stream(self, stream):
...
@abc.abstractmethod
def current_stream(self, device_index=None):
...
@abc.abstractmethod
def default_stream(self, device_index=None):
...
@property
@abc.abstractmethod
def Event(self):
...
# Memory management
@abc.abstractmethod
def empty_cache(self):
...
@abc.abstractmethod
def memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def memory_cached(self, device_index=None):
...
@abc.abstractmethod
def max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def memory_stats(self, device_index=None):
...
@abc.abstractmethod
def reset_peak_memory_stats(self, device_index=None):
...
@abc.abstractmethod
def memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def max_memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def total_memory(self, device_index=None):
...
# Data types
@abc.abstractmethod
def is_bf16_supported(self):
...
@abc.abstractmethod
def is_fp16_supported(self):
...
# Misc
@abc.abstractmethod
def amp(self):
...
@abc.abstractmethod
def is_available(self):
...
@abc.abstractmethod
def range_push(self, msg):
...
@abc.abstractmethod
def range_pop(self):
...
@abc.abstractmethod
def lazy_call(self, callback):
...
@abc.abstractmethod
def communication_backend_name(self):
...
# Tensor operations
@property
@abc.abstractmethod
def BFloat16Tensor(self):
...
@property
@abc.abstractmethod
def ByteTensor(self):
...
@property
@abc.abstractmethod
def DoubleTensor(self):
...
@property
@abc.abstractmethod
def FloatTensor(self):
...
@property
@abc.abstractmethod
def HalfTensor(self):
...
@property
@abc.abstractmethod
def IntTensor(self):
...
@property
@abc.abstractmethod
def LongTensor(self):
...
@abc.abstractmethod
def pin_memory(self, tensor):
...
@abc.abstractmethod
def on_accelerator(self, tensor):
...
@abc.abstractmethod
def op_builder_dir(self):
...
# create an instance of op builder, specified by class_name
@abc.abstractmethod
def create_op_builder(self, class_name):
...
# return an op builder class, specified by class_name
@abc.abstractmethod
def get_op_builder(self, class_name):
...
@abc.abstractmethod
def build_extension(self):
...
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .abstract_accelerator import DeepSpeedAccelerator
from .real_accelerator import get_accelerator, set_accelerator
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
dsa1 = None
try:
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
except ImportError as e:
dsa2 = None
ds_accelerator = None
def _validate_accelerator(accel_obj):
# because abstract_accelerator has different path during
# build time (accelerator.abstract_accelerator)
# and run time (deepspeed.accelerator.abstract_accelerator)
# and extension would import the
# run time abstract_accelerator/DeepSpeedAccelerator as its base
# class, so we need to compare accel_obj with both base class.
# if accel_obj is instance of DeepSpeedAccelerator in one of
# accelerator.abstractor_accelerator
# or deepspeed.accelerator.abstract_accelerator, consider accel_obj
# is a conforming object
if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')
# TODO: turn off is_available test since this breaks tests
#assert accel_obj.is_available(), \
# f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
def get_accelerator():
global ds_accelerator
if ds_accelerator is not None:
return ds_accelerator
accelerator_name = None
ds_set_method = None
# 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
# DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'
if 'DS_ACCELERATOR' in os.environ.keys():
accelerator_name = os.environ['DS_ACCELERATOR']
if accelerator_name == 'xpu':
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401
except ImportError as e:
raise ValueError(
f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')
elif accelerator_name == 'cpu':
try:
import intel_extension_for_pytorch # noqa: F401
except ImportError as e:
raise ValueError(
f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')
elif accelerator_name == 'cuda':
pass
else:
raise ValueError(
f'DS_ACCELERATOR must be one of "cuda", "cpu", or "xpu". Value "{accelerator_name}" is not supported')
ds_set_method = 'override'
# 2. If no override, detect which accelerator to use automatically
if accelerator_name == None:
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811
accelerator_name = 'xpu'
except ImportError as e:
# We need a way to choose between CUDA_Accelerator and CPU_Accelerator
# Currently we detect whether intel_etension_for_pytorch is installed
# in the environment and use CPU_Accelerator if the answewr is True.
# An alternative might be detect whether CUDA device is installed on
# the system but this comes with two pitfalls:
# 1. the system may not have torch pre-installed, so
# get_accelerator().is_avaiable() may not work.
# 2. Some scenario like install on login node (without CUDA device)
# and run on compute node (with CUDA device) may cause mismatch
# between installation time and runtime.
try:
import intel_extension_for_pytorch # noqa: F401,F811
accelerator_name = 'cpu'
except ImportError as e:
accelerator_name = 'cuda'
ds_set_method = 'auto detect'
# 3. Set ds_accelerator accordingly
if accelerator_name == 'cuda':
from .cuda_accelerator import CUDA_Accelerator
ds_accelerator = CUDA_Accelerator()
elif accelerator_name == 'cpu':
from .cpu_accelerator import CPU_Accelerator
ds_accelerator = CPU_Accelerator()
elif accelerator_name == 'xpu':
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
_validate_accelerator(ds_accelerator)
print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
'''
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
my_accelerator.name()='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_get.py---------
**************************************************************************
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
print(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
print(f'{id(my_accelerator)=}')
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
---[output] python test_set.py---------
id(cu_accel)=139648165478304
my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
my_accelerator.name='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_set.py---------
'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
import oneccl_bindings_for_pytorch # noqa: F401
import psutil
import os
# accelerator for Intel CPU
class CPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cpu'
self._communication_backend_name = 'ccl'
self.max_mem = psutil.Process().memory_info().rss
def is_synchronized_device(self):
return True
# Device APIs
def device_name(self, device_index=None):
return 'cpu'
def device(self, device_index=None):
return None
def set_device(self, device_index):
return
def current_device(self):
return os.environ.get('LOCAL_RANK', 0)
def current_device_name(self):
return 'cpu'
def device_count(self):
device_count = int(os.environ.get('LOCAL_SIZE', 0))
if device_count > 0:
return os.environ.get('LOCAL_SIZE')
else:
from deepspeed.utils.numa import get_numa_cores
# Count NUMA node for number of cpu accelerators. On machine with HBM
# In flat mode, HBM is in separate NUMA node with no cores on this node.
# Ignore these NUMA nodes with no cores.
numa_core_lists = get_numa_cores()
numa_count = 0
for core_list in numa_core_lists:
if len(core_list) > 0:
numa_count += 1
return numa_count
def synchronize(self, device_index=None):
return
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index == None:
return torch.set_rng_state(new_state)
return torch.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
return torch.get_rng_state()
def manual_seed(self, seed):
return torch.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.manual_seed(seed)
def initial_seed(self, seed):
return torch.initial_seed(seed)
def default_generator(self, device_index):
return torch.default_generator
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
from deepspeed.runtime.utils import noop_decorator
return noop_decorator
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return
def get_rss(self):
mem = psutil.Process().memory_info().rss
if mem > self.max_mem:
self.max_mem = mem
return mem
def reset_rss(self):
mem = psutil.Process().memory_info().rss
self.max_mem = mem
return mem
def memory_allocated(self, device_index=None):
return self.get_rss()
def max_memory_allocated(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_allocated(self, device_index=None):
self.reset_rss()
return
def memory_cached(self, device_index=None):
return self.get_rss()
def max_memory_cached(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_cached(self, device_index=None):
self.reset_rss()
return
def memory_stats(self, device_index=None):
return self.get_rss()
def reset_peak_memory_stats(self, device_index=None):
self.reset_rss()
return
def memory_reserved(self, device_index=None):
return self.get_rss()
def max_memory_reserved(self, device_index=None):
self.get_rss()
return self.max_mem
def total_memory(self, device_index=None):
return psutil.virtual_memory().total
# Misc
def amp(self):
return torch.cpu.amp
def is_available(self):
return True
def range_push(self, msg):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_push(msg)
return
def range_pop(self):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_pop()
return
def lazy_call(self, callback):
return callback()
def communication_backend_name(self):
return self._communication_backend_name
# Data types
def is_bf16_supported(self):
return True
def is_fp16_supported(self):
return True
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.BFloat16Tensor
@property
def ByteTensor(self):
return torch.ByteTensor
@property
def DoubleTensor(self):
return torch.DoubleTensor
@property
def FloatTensor(self):
return torch.FloatTensor
@property
def HalfTensor(self):
return torch.HalfTensor
@property
def IntTensor(self):
return torch.IntTensor
@property
def LongTensor(self):
return torch.LongTensor
def pin_memory(self, tensor):
return tensor
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder.cpu"
except ImportError:
return "deepspeed.ops.op_builder.cpu"
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cpu'):
return True
else:
return False
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
from op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
except ImportError:
from deepspeed.ops.op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
if class_name == "CCLCommBuilder":
return CCLCommBuilder
else:
# return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import annotations
'''Copyright The Microsoft DeepSpeed Team'''
"""
Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py
"""
import subprocess
import sys
def err(s: str) -> None:
print(s, file=sys.stderr)
COPYRIGHT = [
r"^\(\/\/\|#\) Copyright (c) Microsoft Corporation.$", r"^\(\/\/\|#\) SPDX-License-Identifier: Apache-2.0$",
r"^\(\/\/\|#\) DeepSpeed Team$"
]
success = True
failures = []
for f in sys.argv[1:]:
for copyright_line in COPYRIGHT:
if not success:
break
res = subprocess.run(["git", "grep", "--quiet", "-e", copyright_line, f], capture_output=True)
if res.returncode == 1:
success = False
failures.append(f)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
if not success:
err(f'{failures}: Missing license at top of file')
err(res.stdout.decode("utf-8"))
sys.exit(1)
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import annotations
'''Copyright The Microsoft DeepSpeed Team'''
"""
Checks each file in sys.argv for the string "torch.cuda".
Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py
"""
import subprocess
import sys
def err(s: str) -> None:
print(s, file=sys.stderr)
# There are many ways we could search for the string "torch.cuda", but `git
# grep --no-index` is nice because
# - it's very fast (as compared to iterating over the file in Python)
# - we can reasonably assume it's available on all machines
# - unlike plain grep, which is slower and has different flags on MacOS versus
# Linux, git grep is always the same.
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", "-e", r"torch\.cuda", "--and", "--not", "-e", "#ignore-cuda", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string "torch.cuda" was found.\nPlease replace all calls to torch.cuda with "get_accelerator()" and add the following import line:\n\n from deepspeed.accelerator import get_accelerator\n\nIf your code is mean to be cuda specific, please add the following comment in the line with torch.cuda:\n\n #ignore-cuda\n'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", r"\.cuda()", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string ".cuda()" was found. This implies convert a tensor to cuda tensor. Please replace all calls to tensor.cuda() with "tensor.to(get_accelerator().device_name())" and add the following import line:\nfrom deepspeed.accelerator import get_accelerator'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
USAGE:
$ python3 script/replace_copyright.py --repo_dir ./
"""
import os
import argparse
NEW_COPYRIGHT = ("Copyright (c) Microsoft Corporation.", "SPDX-License-Identifier: Apache-2.0", "", "DeepSpeed Team")
PY_SL_COMMENT = "#"
PY_ML_SINGLE = "'''"
PY_ML_DOUBLE = '"""'
PY_COMMENTS = (PY_SL_COMMENT, PY_ML_SINGLE, PY_ML_DOUBLE)
C_SL_COMMENT = "//"
C_ML_OPEN = "/*"
C_ML_CLOSE = "*/"
C_COMMENTS = (C_SL_COMMENT, C_ML_OPEN, C_ML_CLOSE)
BASH_SL_COMMENT = "#"
BASH_COMMENTS = (BASH_SL_COMMENT, )
DELIM = "|/-\|/-\|BARRIER|/-\|/-\|" # noqa: W605
def parser_args():
parser = argparse.ArgumentParser()
parser.add_argument("--repo_dir", type=str, help="Repository directory")
parser.add_argument("--python_style_ext",
type=str,
nargs="+",
default=[".py"],
help="File types to process with python-style comments")
parser.add_argument("--bash_style_ext",
type=str,
nargs="+",
default=[".sh"],
help="File types to process with bash-style comments")
parser.add_argument("--c_style_ext",
type=str,
nargs="+",
default=[
".c",
".cpp",
".cu",
".h",
".hpp",
".cuh",
".cc",
".hip",
".tr",
],
help="File types to process with C-style comments")
args = parser.parse_args()
return args
# These get_header_* functions are ugly, but they work :)
def get_header_py(fp):
with open(fp, "r") as f:
lines = iter(l for l in f.readlines())
header = []
rest = []
in_multiline = False
multiline_type = None
while (l := next(lines, None)) is not None:
l = l.strip()
if l.startswith(PY_ML_SINGLE) or l.startswith(PY_ML_DOUBLE):
# Detected multiline comment
if in_multiline and multiline_type == l[:3]:
# Ended a multiline comment
in_multiline = False
else:
# Started a multiline comment
in_multiline = True
multiline_type = l[:3]
if l.endswith(multiline_type) and len(l) >= 6:
# Opened and closed multiline comment on single line
in_multiline = False
elif in_multiline and l.endswith(multiline_type):
# Ended a multiline comment
in_multiline = False
elif not (in_multiline or l.startswith(PY_SL_COMMENT) or l == ""):
# Not in a comment
rest += [l + "\n"]
break
header.append(l)
rest += list(lines)
return header, rest
def get_header_c(fp):
with open(fp, "r") as f:
lines = iter(l for l in f.readlines())
header = []
rest = []
in_multiline = False
while (l := next(lines, None)) is not None:
l = l.strip()
if l.startswith(C_ML_OPEN):
# Detected multiline comment
if not l.endswith(C_ML_CLOSE):
# multiline comment not closed on same line
in_multiline = True
elif l.endswith(C_ML_CLOSE):
# Ended a multiline comment
in_multiline = False
elif not in_multiline or l.startswith(C_SL_COMMENT) or l.isspace():
# Not in a comment
rest += [l + "\n"]
break
header.append(l)
rest += list(lines)
return header, rest
def get_header_bash(fp):
with open(fp, "r") as f:
lines = iter(l for l in f.readlines())
header = []
rest = []
while (l := next(lines, None)) is not None:
l = l.strip()
if not l.startswith(BASH_SL_COMMENT) or l.isspace():
# Not in a comment
rest += [l + "\n"]
break
header.append(l)
rest += list(lines)
return header, rest
def remove_comments(line, comment_strs):
for cstr in comment_strs:
line = line.replace(cstr, "")
return line
def format_multiline_comment(text, comment_type):
if comment_type == PY_COMMENTS:
text = f"\n{comment_type[2]}\n" + "\n".join(text) + f"{comment_type[2]}"
if comment_type == C_COMMENTS:
text = f"\n{comment_type[1]}\n" + "\n".join(text) + f"{comment_type[2]}"
if comment_type == BASH_COMMENTS:
text = "\n".join([f"{comment_type[0]}{l}" for l in text])
return text
def modify_file_header(fp, file_header, rest_of_file, preserve_text_store, comment_type):
header_text = "\n".join(file_header)
if not (header_text.strip() == "" or header_text in preserve_text_store):
# Unique header, need to get user input
print("\n", DELIM, "\n")
for idx, line in enumerate(file_header):
print(f"{idx}: {line}")
print("\n", DELIM, "\n")
print("\nIndicate the FIRST line of the Header to KEEP")
print("(shebang #! lines will be automatically processed and should not be included).")
keep_idx = input("Enter number (or leave blank if no lines should be preserved): ")
preserve_text_store[header_text] = file_header[int(keep_idx):] if keep_idx != "" else ""
# Identify any shebang lines in the file
shebang = "\n".join([l for l in file_header if l.startswith("#!")])
if shebang != "":
shebang += "\n"
# Get the text we should preserve in this file and process to remove comment characters
text_to_preserve = preserve_text_store.get(header_text, [""])
text_to_preserve = [remove_comments(l, comment_type) for l in text_to_preserve]
# Format the text we want to keep into a new multiline comment
if "".join(text_to_preserve) == "":
text_to_preserve = ""
else:
text_to_preserve = format_multiline_comment(text_to_preserve, comment_type)
# Generate the copyright text we will be adding
copyright_text = "\n".join([f"{comment_type[0]} {l}" if l != "" else l for l in NEW_COPYRIGHT])
# Assemble the new header
new_header = shebang + copyright_text + text_to_preserve
# Write out the new file
new_file_contents = new_header + "\n" + "".join(rest_of_file)
with open(fp, "w") as f:
f.write(new_file_contents)
return preserve_text_store # Return so we can reuse for future files
def main(args):
preserve_text_store = {} # Used to track header comments we should preserve
for root, dirs, fnames in os.walk(args.repo_dir):
# Walk across directory looking for all files with extensions we want to modify
for ext in args.python_style_ext:
fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)]
for fp in fpaths:
file_header, rest_of_file = get_header_py(fp)
preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store,
PY_COMMENTS)
for ext in args.c_style_ext:
fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)]
for fp in fpaths:
file_header, rest_of_file = get_header_c(fp)
preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store,
C_COMMENTS)
for ext in args.bash_style_ext:
fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)]
for fp in fpaths:
file_header, rest_of_file = get_header_bash(fp)
preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store,
BASH_COMMENTS)
if __name__ == "__main__":
args = parser_args()
main(args)
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import annotations
'''Copyright The Microsoft DeepSpeed Team'''
"""
Checks each file in sys.argv for the string "torch.distributed".
Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py
"""
import subprocess
import sys
def err(s: str) -> None:
print(s, file=sys.stderr)
# There are many ways we could search for the string "torch.distributed", but `git
# grep --no-index` is nice because
# - it's very fast (as compared to iterating over the file in Python)
# - we can reasonably assume it's available on all machines
# - unlike plain grep, which is slower and has different flags on MacOS versus
# Linux, git grep is always the same.
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", r"torch\.distributed", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string "torch.distributed" was found. Please replace all calls to torch.distributed with "deepspeed.comm"'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.