python_code
stringlengths 0
456k
|
---|
import numpy as np
import cv2 as cv
# aruco
adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400))
# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)
# AR scene
cv.ovis.addResourceLocation("packs/Sinbad.zip") # shipped with Ogre
win = cv.ovis.createWindow("arucoAR", imsize, flags=0)
win.setCameraIntrinsics(K, imsize)
win.createEntity("figure", "Sinbad.mesh", (0, 0, 5), (1.57, 0, 0))
win.createLightEntity("sun", (0, 0, 100))
# video capture
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])
while cv.ovis.waitKey(1) != 27:
img = cap.read()[1]
win.setBackground(img)
corners, ids = cv.aruco.detectMarkers(img, adict)[:2]
cv.waitKey(1)
if ids is None:
continue
rvecs, tvecs = cv.aruco.estimatePoseSingleMarkers(corners, 5, K, None)[:2]
win.setCameraPose(tvecs[0].ravel(), rvecs[0].ravel(), invert=True)
|
import numpy as np
import cv2 as cv
# add some external resources
cv.ovis.addResourceLocation("packs/Sinbad.zip")
# camera intrinsics
imsize = (800, 600)
K = np.diag([800, 800, 1])
K[:2, 2] = (400, 500) # offset pp
# observer scene
owin = cv.ovis.createWindow("VR", imsize)
cv.ovis.createGridMesh("ground", (10, 10), (10, 10))
owin.createEntity("ground", "ground", rot=(1.57, 0, 0))
owin.createCameraEntity("cam", K, imsize, 5)
owin.createEntity("sinbad", "Sinbad.mesh", tvec=(0, -5, 0), rot=(np.pi, 0, 0)) # externally defined mesh
owin.createLightEntity("sun", (0, 0, -100))
# setup and play idle animation
owin.setEntityProperty("sinbad", cv.ovis.ENTITY_ANIMBLEND_MODE, 1) # 1 = cumulative
owin.playEntityAnimation("sinbad", "IdleBase")
owin.playEntityAnimation("sinbad", "IdleTop")
# interaction scene
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPARATE | cv.ovis.SCENE_INTERACTIVE)
iwin.createEntity("sinbad", "Sinbad.mesh", tvec=(0, -5, 0), rot=(np.pi, 0, 0))
iwin.createLightEntity("sun", (0, 0, -100))
iwin.setCameraIntrinsics(K, imsize)
while cv.ovis.waitKey(1) != 27:
R, t = iwin.getCameraPose()
owin.setEntityPose("cam", t, R)
del iwin # must be destroyed in reverse creation order |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaarithm_test(NewOpenCVTests):
def setUp(self):
super(cudaarithm_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cudaarithm(self):
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat(npMat)
cuMatDst = cv.cuda_GpuMat(cuMat.size(),cuMat.type())
cuMatB = cv.cuda_GpuMat(cuMat.size(),cv.CV_8UC1)
cuMatG = cv.cuda_GpuMat(cuMat.size(),cv.CV_8UC1)
cuMatR = cv.cuda_GpuMat(cuMat.size(),cv.CV_8UC1)
self.assertTrue(np.allclose(cv.cuda.merge(cv.cuda.split(cuMat)),npMat))
cv.cuda.split(cuMat,[cuMatB,cuMatG,cuMatR])
cv.cuda.merge([cuMatB,cuMatG,cuMatR],cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),npMat))
shift = (np.random.random((cuMat.channels(),)) * 8).astype(np.uint8).tolist()
self.assertTrue(np.allclose(cv.cuda.rshift(cuMat,shift).download(),npMat >> shift))
cv.cuda.rshift(cuMat,shift,cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),npMat >> shift))
self.assertTrue(np.allclose(cv.cuda.lshift(cuMat,shift).download(),(npMat << shift).astype('uint8')))
cv.cuda.lshift(cuMat,shift,cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),(npMat << shift).astype('uint8')))
def test_arithmetic(self):
npMat1 = np.random.random((128, 128, 3)) - 0.5
npMat2 = np.random.random((128, 128, 3)) - 0.5
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMatDst = cv.cuda_GpuMat(cuMat1.size(),cuMat1.type())
self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(),
cv.add(npMat1, npMat2)))
cv.cuda.add(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.add(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(),
cv.subtract(npMat1, npMat2)))
cv.cuda.subtract(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.subtract(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(),
cv.multiply(npMat1, npMat2)))
cv.cuda.multiply(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.multiply(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(),
cv.divide(npMat1, npMat2)))
cv.cuda.divide(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.divide(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(),
cv.absdiff(npMat1, npMat2)))
cv.cuda.absdiff(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.absdiff(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
cv.compare(npMat1, npMat2, cv.CMP_GE)))
cuMatDst1 = cv.cuda_GpuMat(cuMat1.size(),cv.CV_8UC3)
cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE, cuMatDst1)
self.assertTrue(np.allclose(cuMatDst1.download(),cv.compare(npMat1, npMat2, cv.CMP_GE)))
self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(),
np.abs(npMat1)))
cv.cuda.abs(cuMat1, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),np.abs(npMat1)))
self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
cv.cuda.abs(cuMat1).download()))
cv.cuda.sqr(cuMat1, cuMatDst)
cv.cuda.sqrt(cuMatDst, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.cuda.abs(cuMat1).download()))
self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(),
npMat1))
cv.cuda.exp(cuMat1, cuMatDst)
cv.cuda.log(cuMatDst, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),npMat1))
self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(),
cv.pow(npMat1, 2)))
cv.cuda.pow(cuMat1, 2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.pow(npMat1, 2)))
def test_logical(self):
npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMatDst = cv.cuda_GpuMat(cuMat1.size(),cuMat1.type())
self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
cv.bitwise_or(npMat1, npMat2)))
cv.cuda.bitwise_or(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_or(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
cv.bitwise_and(npMat1, npMat2)))
cv.cuda.bitwise_and(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_and(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
cv.bitwise_xor(npMat1, npMat2)))
cv.cuda.bitwise_xor(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_xor(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
cv.bitwise_not(npMat1)))
cv.cuda.bitwise_not(cuMat1, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_not(npMat1)))
self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
cv.min(npMat1, npMat2)))
cv.cuda.min(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.min(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
cv.max(npMat1, npMat2)))
cv.cuda.max(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.max(npMat1, npMat2)))
def test_convolution(self):
npMat = (np.random.random((128, 128)) * 255).astype(np.float32)
npDims = np.array(npMat.shape)
kernel = (np.random.random((3, 3)) * 1).astype(np.float32)
kernelDims = np.array(kernel.shape)
iS = (kernelDims/2).astype(int)
iE = npDims - kernelDims + iS
cuMat = cv.cuda_GpuMat(npMat)
cuKernel= cv.cuda_GpuMat(kernel)
cuMatDst = cv.cuda_GpuMat(tuple(npDims - kernelDims + 1), cuMat.type())
conv = cv.cuda.createConvolution()
self.assertTrue(np.allclose(conv.convolve(cuMat,cuKernel,ccorr=True).download(),
cv.filter2D(npMat,-1,kernel,anchor=(-1,-1))[iS[0]:iE[0]+1,iS[1]:iE[1]+1]))
conv.convolve(cuMat,cuKernel,cuMatDst,True)
self.assertTrue(np.allclose(cuMatDst.download(),
cv.filter2D(npMat,-1,kernel,anchor=(-1,-1))[iS[0]:iE[0]+1,iS[1]:iE[1]+1]))
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
import subprocess
import os
import sys
basedir = os.path.dirname(sys.argv[0])
readme = os.path.join(basedir, "README.md")
with open(readme) as f:
inp = f.read()
out = ""
it = iter(inp.splitlines(True))
for line in it:
out += line
if line.startswith("```cmdoutput"):
# Get command.
cmd = next(it)
assert cmd.startswith("$ "), cmd
real_cmd = cmd[2:].strip()
out += cmd
print("Running: " + real_cmd)
out += subprocess.check_output(real_cmd, shell=True)
# Skip pre-existing command output.
line = next(it)
while not line.startswith("```"):
line = next(it)
out += line
with open(readme, "w") as f:
f.write(out)
|
import os
import re
from datetime import datetime
from setuptools import find_packages, setup
from op_builder.utils import get_cuda_bare_metal_version
try:
import torch
from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 10):
raise RuntimeError("Colossal-AI requires Pytorch 1.10 or newer.\n"
"The latest stable release can be obtained from https://pytorch.org/")
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
CUDA_HOME = None
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
build_cuda_ext = False
ext_modules = []
is_nightly = int(os.environ.get('NIGHTLY', '0')) == 1
if int(os.environ.get('CUDA_EXT', '0')) == 1:
if not TORCH_AVAILABLE:
raise ModuleNotFoundError(
"PyTorch is not found while CUDA_EXT=1. You need to install PyTorch first in order to build CUDA extensions"
)
if not CUDA_HOME:
raise RuntimeError(
"CUDA_HOME is not found while CUDA_EXT=1. You need to export CUDA_HOME environment vairable or install CUDA Toolkit first in order to build CUDA extensions"
)
build_cuda_ext = True
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if bare_metal_major != torch_binary_major:
print(f'The detected CUDA version ({raw_output}) mismatches the version that was used to compile PyTorch '
f'({torch.version.cuda}). CUDA extension will not be installed.')
return False
if bare_metal_minor != torch_binary_minor:
print("\nWarning: Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
f"Pytorch binaries were compiled with Cuda {torch.version.cuda}.\n"
"In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. ")
return True
def check_cuda_availability(cuda_dir):
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query
# torch.cuda.get_device_capability(), which will fail if you are compiling in an environment
# without visible GPUs (e.g. during an nvidia-docker build command).
print(
'\nWarning: Torch did not find available GPUs on this system.\n',
'If your intention is to cross-compile, this is not an error.\n'
'By default, Colossal-AI will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
'Volta (compute capability 7.0), Turing (compute capability 7.5),\n'
'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n'
'If you wish to cross-compile for a single specific architecture,\n'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, _ = get_cuda_bare_metal_version(cuda_dir)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
return False
if cuda_dir is None:
print("nvcc was not found. CUDA extension will not be installed. If you're installing within a container from "
"https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
return False
return True
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
def get_version():
setup_file_path = os.path.abspath(__file__)
project_path = os.path.dirname(setup_file_path)
version_txt_path = os.path.join(project_path, 'version.txt')
version_py_path = os.path.join(project_path, 'colossalai/version.py')
with open(version_txt_path) as f:
version = f.read().strip()
# write version into version.py
with open(version_py_path, 'w') as f:
f.write(f"__version__ = '{version}'\n")
if build_cuda_ext:
torch_version = '.'.join(torch.__version__.split('.')[:2])
cuda_version = '.'.join(get_cuda_bare_metal_version(CUDA_HOME)[1:])
else:
torch_version = None
cuda_version = None
if torch_version:
f.write(f'torch = "{torch_version}"\n')
else:
f.write('torch = None\n')
if cuda_version:
f.write(f'cuda = "{cuda_version}"\n')
else:
f.write('cuda = None\n')
return version
if build_cuda_ext:
build_cuda_ext = check_cuda_availability(CUDA_HOME) and check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
if build_cuda_ext:
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
from op_builder import ALL_OPS
for name, builder_cls in ALL_OPS.items():
print(f'===== Building Extension {name} =====')
ext_modules.append(builder_cls().builder())
# always put not nightly branch as the if branch
# otherwise github will treat colossalai-nightly as the project name
# and it will mess up with the dependency graph insights
if not is_nightly:
version = get_version()
package_name = 'colossalai'
else:
# use date as the nightly version
version = datetime.today().strftime('%Y.%m.%d')
package_name = 'colossalai-nightly'
setup(name=package_name,
version=version,
packages=find_packages(exclude=(
'benchmark',
'docker',
'tests',
'docs',
'examples',
'tests',
'scripts',
'requirements',
'*.egg-info',
)),
description='An integrated large-scale model training system with efficient parallelization techniques',
long_description=fetch_readme(),
long_description_content_type='text/markdown',
license='Apache Software License 2.0',
url='https://www.colossalai.org',
project_urls={
'Forum': 'https://github.com/hpcaitech/ColossalAI/discussions',
'Bug Tracker': 'https://github.com/hpcaitech/ColossalAI/issues',
'Examples': 'https://github.com/hpcaitech/ColossalAI-Examples',
'Documentation': 'http://colossalai.readthedocs.io',
'Github': 'https://github.com/hpcaitech/ColossalAI',
},
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension} if ext_modules else {},
install_requires=fetch_requirements('requirements/requirements.txt'),
entry_points='''
[console_scripts]
colossalai=colossalai.cli:cli
''',
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Environment :: GPU :: NVIDIA CUDA',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: System :: Distributed Computing',
],
package_data={
'colossalai': [
'_C/*.pyi', 'kernel/cuda_native/csrc/*', 'kernel/cuda_native/csrc/kernel/*',
'kernel/cuda_native/csrc/kernels/include/*'
]
})
|
import os
from .builder import Builder
from .utils import append_nvcc_threads
class ScaledMaskedSoftmaxBuilder(Builder):
NAME = "scaled_masked_softmax"
PREBUILT_IMPORT_PATH = "colossalai._C.scaled_masked_softmax"
def __init__(self):
super().__init__(name=ScaledMaskedSoftmaxBuilder.NAME, prebuilt_import_path=ScaledMaskedSoftmaxBuilder.PREBUILT_IMPORT_PATH)
# necessary 4 functions
def sources_files(self):
ret = [
self.csrc_abs_path(fname) for fname in
['scaled_masked_softmax.cpp', 'scaled_masked_softmax_cuda.cu']
]
return ret
def include_dirs(self):
return [
self.csrc_abs_path("kernels/include"),
self.get_cuda_home_include()
]
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]
ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags
return append_nvcc_threads(ret)
|
import os
from .builder import Builder
from .utils import append_nvcc_threads, get_cuda_cc_flag
class LayerNormBuilder(Builder):
NAME = "layernorm"
PREBUILT_IMPORT_PATH = "colossalai._C.layernorm"
def __init__(self):
super().__init__(name=LayerNormBuilder.NAME, prebuilt_import_path=LayerNormBuilder.PREBUILT_IMPORT_PATH)
def sources_files(self):
ret = [self.csrc_abs_path(fname) for fname in ['layer_norm_cuda.cpp', 'layer_norm_cuda_kernel.cu']]
return ret
def include_dirs(self):
ret = [self.csrc_abs_path('kernels/include'), self.get_cuda_home_include()]
return ret
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = ['-maxrregcount=50']
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags + self.version_dependent_macros
return append_nvcc_threads(ret)
|
import os
from .builder import Builder
from .utils import get_cuda_cc_flag
class FusedOptimBuilder(Builder):
NAME = "fused_optim"
PREBUILT_IMPORT_PATH = "colossalai._C.fused_optim"
def __init__(self):
super().__init__(name=FusedOptimBuilder.NAME, prebuilt_import_path=FusedOptimBuilder.PREBUILT_IMPORT_PATH)
def sources_files(self):
ret = [
self.csrc_abs_path(fname) for fname in [
'colossal_C_frontend.cpp', 'multi_tensor_sgd_kernel.cu', 'multi_tensor_scale_kernel.cu',
'multi_tensor_adam.cu', 'multi_tensor_l2norm_kernel.cu', 'multi_tensor_lamb.cu'
]
]
return ret
def include_dirs(self):
ret = [self.csrc_abs_path('kernels/include'), self.get_cuda_home_include()]
return ret
def cxx_flags(self):
version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
return ['-O3'] + version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = ['-lineinfo']
extra_cuda_flags.extend(get_cuda_cc_flag())
return ['-O3', '--use_fast_math'] + extra_cuda_flags
|
import os
from .builder import Builder
from .utils import append_nvcc_threads, get_cuda_cc_flag
class MultiHeadAttnBuilder(Builder):
NAME = "multihead_attention"
PREBUILT_IMPORT_PATH = "colossalai._C.multihead_attention"
def __init__(self):
super().__init__(name=MultiHeadAttnBuilder.NAME,
prebuilt_import_path=MultiHeadAttnBuilder.PREBUILT_IMPORT_PATH)
def include_dirs(self):
ret = [self.csrc_abs_path("kernels/include"), self.get_cuda_home_include()]
return ret
def sources_files(self):
ret = [
self.csrc_abs_path(fname) for fname in [
'multihead_attention_1d.cpp', 'kernels/cublas_wrappers.cu', 'kernels/transform_kernels.cu',
'kernels/dropout_kernels.cu', 'kernels/normalize_kernels.cu', 'kernels/softmax_kernels.cu',
'kernels/general_kernels.cu', 'kernels/cuda_util.cu'
]
]
return ret
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags
return append_nvcc_threads(ret)
|
from .cpu_adam import CPUAdamBuilder
from .fused_optim import FusedOptimBuilder
from .layernorm import LayerNormBuilder
from .moe import MOEBuilder
from .multi_head_attn import MultiHeadAttnBuilder
from .scaled_masked_softmax import ScaledMaskedSoftmaxBuilder
from .scaled_upper_triangle_masked_softmax import ScaledUpperTrainglemaskedSoftmaxBuilder
ALL_OPS = {
'cpu_adam': CPUAdamBuilder,
'fused_optim': FusedOptimBuilder,
'moe': MOEBuilder,
'multi_head_attn': MultiHeadAttnBuilder,
'scaled_masked_softmax': ScaledMaskedSoftmaxBuilder,
'scaled_upper_triangle_masked_softmax': ScaledUpperTrainglemaskedSoftmaxBuilder,
'layernorm': LayerNormBuilder,
}
__all__ = [
'ALL_OPS', 'CPUAdamBuilder', 'FusedOptimBuilder', 'MultiHeadAttnBuilder', 'ScaledMaskedSoftmaxBuilder',
'ScaledUpperTrainglemaskedSoftmaxBuilder', 'MOEBuilder', 'MultiTensorSGDBuilder', 'MultiTensorAdamBuilder',
'MultiTensorLambBuilder', 'MultiTensorScaleBuilder', 'MultiTensorL2NormBuilder'
]
|
import importlib
import os
import time
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List
def print_rank_0(message):
"""
Print on only one process to avoid spamming.
"""
try:
import torch.distributed as dist
if not dist.is_initialized():
is_main_rank = True
else:
is_main_rank = dist.get_rank() == 0
except ImportError:
is_main_rank = True
if is_main_rank:
print(message)
class Builder(ABC):
"""
Builder is the base class to build extensions for PyTorch.
Args:
name (str): the name of the kernel to be built
prebuilt_import_path (str): the path where the extension is installed during pip install
"""
def __init__(self, name: str, prebuilt_import_path: str):
self.name = name
self.prebuilt_import_path = prebuilt_import_path
self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
assert prebuilt_import_path.startswith('colossalai._C'), \
f'The prebuilt_import_path should start with colossalai._C, but got {self.prebuilt_import_path}'
def relative_to_abs_path(self, code_path: str) -> str:
"""
This function takes in a path relative to the colossalai root directory and return the absolute path.
"""
op_builder_module_path = Path(__file__).parent
# if we install from source
# the current file path will be op_builder/builder.py
# if we install via pip install colossalai
# the current file path will be colossalai/kernel/op_builder/builder.py
# this is because that the op_builder inside colossalai is a symlink
# this symlink will be replaced with actual files if we install via pypi
# thus we cannot tell the colossalai root directory by checking whether the op_builder
# is a symlink, we can only tell whether it is inside or outside colossalai
if str(op_builder_module_path).endswith('colossalai/kernel/op_builder'):
root_path = op_builder_module_path.parent.parent
else:
root_path = op_builder_module_path.parent.joinpath('colossalai')
code_abs_path = root_path.joinpath(code_path)
return str(code_abs_path)
def get_cuda_home_include(self):
"""
return include path inside the cuda home.
"""
from torch.utils.cpp_extension import CUDA_HOME
if CUDA_HOME is None:
raise RuntimeError("CUDA_HOME is None, please set CUDA_HOME to compile C++/CUDA kernels in ColossalAI.")
cuda_include = os.path.join(CUDA_HOME, "include")
return cuda_include
def csrc_abs_path(self, path):
return os.path.join(self.relative_to_abs_path('kernel/cuda_native/csrc'), path)
# functions must be overrided begin
@abstractmethod
def sources_files(self) -> List[str]:
"""
This function should return a list of source files for extensions.
"""
raise NotImplementedError
@abstractmethod
def include_dirs(self) -> List[str]:
"""
This function should return a list of inlcude files for extensions.
"""
pass
@abstractmethod
def cxx_flags(self) -> List[str]:
"""
This function should return a list of cxx compilation flags for extensions.
"""
pass
@abstractmethod
def nvcc_flags(self) -> List[str]:
"""
This function should return a list of nvcc compilation flags for extensions.
"""
pass
# functions must be overrided over
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def import_op(self):
"""
This function will import the op module by its string name.
"""
return importlib.import_module(self.prebuilt_import_path)
def load(self, verbose=True):
"""
load the kernel during runtime. If the kernel is not built during pip install, it will build the kernel.
If the kernel is built during runtime, it will be stored in `~/.cache/colossalai/torch_extensions/`. If the
kernel is built during pip install, it can be accessed through `colossalai._C`.
Warning: do not load this kernel repeatedly during model execution as it could slow down the training process.
Args:
verbose (bool, optional): show detailed info. Defaults to True.
"""
from torch.utils.cpp_extension import load
start_build = time.time()
try:
op_module = self.import_op()
if verbose:
print_rank_0(f"OP {self.prebuilt_import_path} already exists, skip building.")
except ImportError:
# construct the build directory
import torch
torch_version_major = torch.__version__.split('.')[0]
torch_version_minor = torch.__version__.split('.')[1]
torch_cuda_version = torch.version.cuda
home_directory = os.path.expanduser('~')
extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_cu{torch_cuda_version}"
build_directory = os.path.join(home_directory, extension_directory)
Path(build_directory).mkdir(parents=True, exist_ok=True)
if verbose:
print_rank_0(
"=========================================================================================")
print_rank_0(f"No pre-built kernel is found, build and load the {self.name} kernel during runtime now")
print_rank_0(
"=========================================================================================")
# load the kernel
op_module = load(name=self.name,
sources=self.strip_empty_entries(self.sources_files()),
extra_include_paths=self.strip_empty_entries(self.include_dirs()),
extra_cflags=self.cxx_flags(),
extra_cuda_cflags=self.nvcc_flags(),
extra_ldflags=[],
build_directory=build_directory,
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print_rank_0(f"Time to load {self.name} op: {build_duration} seconds")
return op_module
def builder(self) -> 'CUDAExtension':
"""
get a CUDAExtension instance used for setup.py
"""
from torch.utils.cpp_extension import CUDAExtension
return CUDAExtension(name=self.prebuilt_import_path,
sources=self.strip_empty_entries(self.sources_files()),
include_dirs=self.strip_empty_entries(self.include_dirs()),
extra_compile_args={
'cxx': self.strip_empty_entries(self.cxx_flags()),
'nvcc': self.strip_empty_entries(self.nvcc_flags())
})
|
import os
from .builder import Builder
from .utils import append_nvcc_threads
class CPUAdamBuilder(Builder):
NAME = "cpu_adam"
PREBUILT_IMPORT_PATH = "colossalai._C.cpu_adam"
def __init__(self):
super().__init__(name=CPUAdamBuilder.NAME, prebuilt_import_path=CPUAdamBuilder.PREBUILT_IMPORT_PATH)
self.version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
# necessary 4 functions
def sources_files(self):
ret = [
self.csrc_abs_path('cpu_adam.cpp'),
]
return ret
def include_dirs(self):
return [
self.csrc_abs_path("includes"),
self.get_cuda_home_include()
]
def cxx_flags(self):
extra_cxx_flags = ['-std=c++14', '-lcudart', '-lcublas', '-g', '-Wno-reorder', '-fopenmp', '-march=native']
return ['-O3'] + self.version_dependent_macros + extra_cxx_flags
def nvcc_flags(self):
extra_cuda_flags = [
'-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK'
]
ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags
return append_nvcc_threads(ret)
|
import re
import subprocess
from typing import List
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def get_cuda_cc_flag() -> List:
"""get_cuda_cc_flag
cc flag for your GPU arch
"""
# only import torch when needed
# this is to avoid importing torch when building on a machine without torch pre-installed
# one case is to build wheel for pypi release
import torch
cc_flag = []
for arch in torch.cuda.get_arch_list():
res = re.search(r'sm_(\d+)', arch)
if res:
arch_cap = res[1]
if int(arch_cap) >= 60:
cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
return cc_flag
def append_nvcc_threads(nvcc_extra_args):
from torch.utils.cpp_extension import CUDA_HOME
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
|
import os
from .builder import Builder
from .utils import append_nvcc_threads, get_cuda_cc_flag
class ScaledUpperTrainglemaskedSoftmaxBuilder(Builder):
NAME = "scaled_upper_triangle_masked_softmax"
PREBUILT_IMPORT_PATH = "colossalai._C.scaled_upper_triangle_masked_softmax"
def __init__(self):
super().__init__(name=ScaledUpperTrainglemaskedSoftmaxBuilder.NAME, prebuilt_import_path=ScaledUpperTrainglemaskedSoftmaxBuilder.PREBUILT_IMPORT_PATH)
def include_dirs(self):
return [
self.csrc_abs_path("kernels/include"),
self.get_cuda_home_include()
]
def sources_files(self):
ret = [
self.csrc_abs_path(fname)
for fname in ['scaled_upper_triang_masked_softmax.cpp', 'scaled_upper_triang_masked_softmax_cuda.cu']
]
return ret
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
'--expt-extended-lambda'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags
return append_nvcc_threads(ret)
|
import os
from .builder import Builder
from .utils import append_nvcc_threads, get_cuda_cc_flag
class MOEBuilder(Builder):
NAME = "moe"
PREBUILT_IMPORT_PATH = "colossalai._C.moe"
def __init__(self):
super().__init__(name=MOEBuilder.NAME, prebuilt_import_path=MOEBuilder.PREBUILT_IMPORT_PATH)
def include_dirs(self):
ret = [
self.csrc_abs_path("kernels/include"),
self.get_cuda_home_include()
]
return ret
def sources_files(self):
ret = [self.csrc_abs_path(fname) for fname in ['moe_cuda.cpp', 'moe_cuda_kernel.cu']]
return ret
def cxx_flags(self):
return ['-O3'] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
'--expt-extended-lambda'
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ['-O3', '--use_fast_math'] + extra_cuda_flags
return append_nvcc_threads(ret)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.multiprocessing as mp
from colossalai import launch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import free_port
from colossalai.context import reset_seeds
from colossalai.global_variables import tensor_parallel_env as tp_env
from colossalai.testing import rerun_if_address_is_in_use
CONFIG_PATH_LIST = list(Path(__file__).parent.glob('configs/*.py'))
def check_data_parallel_rank(rank):
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
mp_size = gpc.get_world_size(ParallelMode.MODEL)
num_dp_groups = global_world_size // mp_size
dp_local_rank = gpc.get_local_rank(ParallelMode.DATA)
assert gpc.get_world_size(ParallelMode.DATA) == num_dp_groups
for group_idx in range(num_dp_groups):
ranks_in_dp_group = range(group_idx * mp_size, (group_idx + 1) * mp_size)
if rank in ranks_in_dp_group:
assert dp_local_rank == group_idx
def check_pipeline_parallel_rank(rank):
mp_world_size = gpc.get_world_size(ParallelMode.MODEL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_pipeline_stage = mp_world_size // tp_world_size
pipeline_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
for stage_idx in range(num_pipeline_stage):
ranks_in_current_stage = range(stage_idx * tp_world_size, (stage_idx + 1) * tp_world_size)
if rank in ranks_in_current_stage:
assert stage_idx == pipeline_local_rank
def check_model_parallel_rank(rank):
mp_size = gpc.get_world_size(ParallelMode.MODEL)
rank_within_mp_group = rank % mp_size
mp_local_rank = gpc.get_local_rank(ParallelMode.MODEL)
assert rank_within_mp_group == mp_local_rank
def check_tensor_parallel_rank(rank):
if tp_env.mode == '2d':
check_2d_tensor_parallel_rank(rank)
elif tp_env == '2.5d':
check_2p5d_tensor_parallel_rank(rank)
elif tp_env == '3d':
check_3d_tensor_parallel_rank(rank)
def get_tp_info():
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_tp_groups = global_world_size // tp_world_size
tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)
return tp_local_rank, tp_world_size, num_tp_groups
def check_2d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
col_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
row_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
assert col_local_rank == tp_local_rank // tp_env.summa_dim
assert row_local_rank == tp_local_rank % tp_env.summa_dim
def check_2p5d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)
assert rp_rank == tp_local_rank % tp_env.summa_dim
assert cp_rank == tp_local_rank // tp_env.tesseract_dim
assert dp_rank == tp_local_rank // (tp_env.summa_dim**2)
assert xp_rank == tp_local_rank // tp_env.summa_dim
def check_3d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)
wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)
op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)
assert ip_rank == tp_local_rank % tp_env.depth_3d
assert wp_rank == tp_local_rank // tp_env.depth_3d
assert op_rank == tp_local_rank // (tp_env.depth_3d**2)
def init_context(config_path, rank, world_size, backend, port, host):
dist_args = dict(config=config_path,
rank=rank,
world_size=world_size,
backend=backend,
port=port,
host=host,
verbose=True)
launch(**dist_args)
check_tensor_parallel_rank(rank)
check_data_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
check_model_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
def run_dist(rank, world_size, backend, port_list, host):
for config_path, port in zip(CONFIG_PATH_LIST, port_list):
init_context(config_path=config_path, rank=rank, world_size=world_size, backend=backend, port=port, host=host)
reset_seeds()
@pytest.mark.cpu
@rerun_if_address_is_in_use()
def test_context():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
port_list = []
for _ in range(len(CONFIG_PATH_LIST)):
while True:
port = free_port()
if port not in port_list:
port_list.append(port)
break
test_fn = partial(run_dist, world_size=world_size, backend='gloo', port_list=port_list, host='localhost')
mp.spawn(test_fn, nprocs=world_size)
if __name__ == '__main__':
test_context()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=4,
mode='2d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
mode='3d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
depth=2,
mode='2.5d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import TensorShardStrategy
from torchvision.models import resnet50
def run_dist(rank, world_size, port):
# this test only runs on resnet18
# as this model has sync batch normalization
# need to configure cudnn deterministic so that
# randomness of convolution layers will be disabled
zero_config = dict(model_config=dict(shard_strategy=TensorShardStrategy()))
colossalai.launch(config=dict(zero=zero_config, cudnn_determinstic=True, cudnn_benchmark=False),
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
with ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True):
model = resnet50()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.CrossEntropyLoss()
engine, *args = colossalai.initialize(model, optimizer, criterion)
# train for dummy iterations
engine.train()
for _ in range(2):
data = torch.rand(4, 3, 128, 128).cuda().half()
label = torch.randint(0, 10, size=(4,)).cuda()
engine.zero_grad()
out = engine(data)
loss = engine.criterion(out, label)
engine.backward(loss)
engine.step()
# test
# need to make sure the batch norm stats are synchronized
# so that given the same input, the model will produce the same
# output on different ranks
engine.eval()
data = torch.rand(4, 3, 128, 128).cuda().half()
dist.broadcast(data, src=0, group=gpc.get_group(ParallelMode.DATA))
# predict
out = engine(data)
# test if results are equal
tensor_list = [torch.empty_like(out) for _ in range(world_size - 1)]
tensor_list.insert(rank, out)
dist.all_gather(tensor_list=tensor_list, tensor=out, group=gpc.get_group(ParallelMode.DATA))
assert torch.all(tensor_list[0] == tensor_list[1]), \
'expected the output from different ranks to be the same, but got different values'
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_sharded_optim_with_sync_bn():
"""
This test is to make sure that buffers are synchronized between ranks
when using ZeRO. An example of module buffer is the running stats of
BatchNormalization layer, i.e. mean and var.
If the buffers are not synchronized, the model will produce different
output even though the input and parameters are the same. This is not
wanted if we are doing predictions.
"""
world_size = 2
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_with_sync_bn()
|
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import BucketTensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from tests.components_to_test.registry import non_distributed_component_funcs
from tests.test_zero.test_sharded_optim_v2 import _run_step
from common import CONFIG
@parameterize("cpu_offload", [True, False])
@parameterize("shard_strategy_class", [BucketTensorShardStrategy])
@parameterize("gpu_margin_mem_ratio", [0.0, 0.7])
def _run_test_found_inf(cpu_offload, shard_strategy_class, gpu_margin_mem_ratio):
test_models = ['repeated_computed_layers']
shard_strategy = shard_strategy_class()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(target_device=torch.device(f'cpu:0') if cpu_offload else get_current_device(),
shard_strategy=shard_strategy,
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(
zero_model,
shard_strategy,
tensor_placement_policy='cpu' if cpu_offload else 'cuda',
reuse_fp16_shard=True,
)
sharded_optim = HybridAdam(zero_model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model, sharded_optim, gpu_margin_mem_ratio=gpu_margin_mem_ratio)
for i, (data, label) in enumerate(train_dataloader):
if i > 1:
break
assert zero_model.overflow_counter == 0
data, label = data.cuda(), label.cuda()
_run_step(zero_model, sharded_optim, data, label, criterion, False)
for param in zero_model.parameters():
assert not has_inf_or_nan(param.colo_attr.data_payload)
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_test_found_inf()
# use_cpuadam = True can be used with cpu_offload = False
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_found_inf(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_found_inf(world_size=2)
|
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.gemini.tensor_utils import (colo_tensor_mem_usage, colo_model_data_tensor_move,
colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu,
colo_model_tensor_clone)
from colossalai.gemini.stateful_tensor import StatefulTensor
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
import torch
from functools import partial
import torch.multiprocessing as mp
def _run_colo_tensor_mem_usage():
for i in range(1):
if i == 1:
t1 = StatefulTensor(torch.randn(2, 2))
t2 = StatefulTensor(torch.randn(4, 4))
c1, g1 = colo_tensor_mem_usage(t1)
c2, g2 = colo_tensor_mem_usage(t2)
assert c1 * 4 == c2
assert g1 * 4 == g2
else:
t1 = torch.randn(2, 2)
t2 = torch.randn(4, 4)
c1, g1 = colo_tensor_mem_usage(t1)
c2, g2 = colo_tensor_mem_usage(t2)
assert c1 * 4 == c2
assert g1 * 4 == g2
def _run_colo_model_data_tensor_move_inline():
for t in [StatefulTensor(torch.randn(2, 3)), torch.randn(2, 3)]:
colo_model_data_tensor_move_inline(t, get_current_device())
assert t.device == get_current_device()
def _run_colo_model_data_tensor_move():
for t in [(StatefulTensor(torch.ones(2, 3)), StatefulTensor(torch.zeros(2, 3).to(get_current_device()))),
(torch.ones(2, 3), torch.zeros(2, 3).to(get_current_device()))]:
cpu_t, cuda_t = t
colo_model_data_tensor_move(cpu_t, cuda_t)
assert cuda_t.device == get_current_device()
def _run_colo_model_data_move_to_cpu():
for t in [StatefulTensor(torch.randn(2, 2)), torch.randn(4, 4)]:
colo_model_data_move_to_cpu(t)
assert t.device == torch.device("cpu")
def _run_colo_model_tensor_clone():
for t in [
StatefulTensor(torch.randn(2, 2).cuda(torch.cuda.current_device())),
torch.randn(4, 4).cuda(torch.cuda.current_device())
]:
if issubclass(type(t), StatefulTensor):
assert t.payload.device == get_current_device()
else:
assert t.device == get_current_device()
p = colo_model_tensor_clone(t, get_current_device())
assert p.device == get_current_device()
for i in range(2):
for j in range(2):
if issubclass(type(t), StatefulTensor):
assert t.payload.device == p.device
assert t.payload[i][j] == p[i][j]
else:
assert t.device == p.device
assert t[i][j] == p[i][j]
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_colo_tensor_mem_usage()
_run_colo_model_data_tensor_move_inline()
_run_colo_model_data_tensor_move()
_run_colo_model_data_move_to_cpu()
_run_colo_model_tensor_clone()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_if_address_is_in_use()
def test_zero_tensor_utils(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_tensor_utils(world_size=2)
|
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_param import ShardedTensor
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
from tests.test_zero.common import CONFIG, allclose
from colossalai.gemini.stateful_tensor import StatefulTensor
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_shard_tensor_with_strategy(shard_strategy_class, world_size):
t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
assert list(t.origin_shape) == [world_size * 2, 3]
assert list(t.shape) == [world_size * 2, 3]
shard_strategy = shard_strategy_class()
# test shard strategy
shard_strategy.shard([t])
assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
shard_strategy.gather([t])
assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
def _run_shard_tensor(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_shard_tensor_with_strategy(world_size=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_shard_tensor(world_size):
run_func = partial(_run_shard_tensor, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
def _run_shard_param_v2(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
param = torch.nn.Parameter(torch.randn(2, 3))
param_ref = deepcopy(param)
sparam = ShardedParamV2(param=param)
allclose(sparam.data_payload, param_ref.data)
# Test get memory usage
sparam.saved_grad = StatefulTensor(torch.randn(2, 3))
cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
assert cpu_mem_use == 2 * 3 * 4 * 2, f"cpu_mem_use: {cpu_mem_use}"
sparam.set_data_none()
assert (param.data.numel() == 0)
cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
# 4 is size of dummy tensor of param.data
assert cpu_mem_use == 2 * 3 * 4 * 2
sparam.saved_grad = StatefulTensor(torch.randn(2, 3))
sparam.set_data_none()
cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
assert cpu_mem_use == 2 * 3 * 4 * 2
assert cuda_mem_use == 0
# append a grad to torch param
param.data = sparam.data_payload
param.grad = torch.randn(2, 3)
cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
assert cpu_mem_use == 2 * 3 * 4 * 2 + 2 * 3 * 4, f"cpu_mem_use {cpu_mem_use}"
assert cuda_mem_use == 0
# reuse torch grad for sparam
sparam.saved_grad = StatefulTensor(param.grad)
cuda_mem_use, cpu_mem_use = sparam.get_memory_usage()
assert cpu_mem_use == 2 * 3 * 4 * 2
assert cuda_mem_use == 0
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_shard_param_v2(world_size):
run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
# test_shard_tensor(2)
test_shard_param_v2(2)
|
import pytest
import colossalai
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from functools import partial
from tests.test_tensor.common_utils import set_seed
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.testing import parameterize
from colossalai.nn.optimizer import HybridAdam
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import TensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from colossalai.tensor import ProcessGroup
def init_zero(model_builder, placement_policy):
device = get_current_device() if placement_policy == 'cuda' else torch.device('cpu')
shard_strategy = TensorShardStrategy()
with ZeroInitContext(target_device=device, shard_strategy=shard_strategy, shard_param=True):
model = model_builder()
model = ShardedModelV2(
model,
shard_strategy,
tensor_placement_policy=placement_policy,
reuse_fp16_shard=True,
)
optim = HybridAdam(model.parameters(), lr=1e-3)
optim = ShardedOptimizerV2(model, optim, initial_scale=32)
return model, optim
def run_step(model, optim, criterion, data, label):
optim.zero_grad()
logits = model(data)
loss = criterion(logits, label)
optim.backward(loss)
optim.step()
def check_state_dict_eq(state_dict, other):
for p, state in state_dict['state'].items():
other_state = other['state'][p]
for k, v in state.items():
if isinstance(v, torch.Tensor):
assert torch.allclose(v, other_state[k], atol=1e-3), f'{v} vs {other_state[k]}'
else:
assert v == other_state[k]
@parameterize('placement_policy', ['cuda', 'cpu'])
def run_nested_model(placement_policy):
get_components_func = non_distributed_component_funcs.get_callable('simple_net')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
set_seed(42)
model, optim = init_zero(model_builder, placement_policy)
set_seed(42)
model_copy, optim_copy = init_zero(model_builder, placement_policy)
model.train()
model_copy.train()
pg = ProcessGroup()
set_seed(pg.dp_local_rank())
data_iter = iter(train_dataloader)
data, label = map(lambda x: x.cuda(), next(data_iter))
run_step(model, optim, criterion, data, label)
optim_copy.load_state_dict(optim.state_dict())
check_state_dict_eq(optim.state_dict(), optim_copy.state_dict())
data, label = map(lambda x: x.cuda(), next(data_iter))
run_step(model_copy, optim_copy, criterion, data, label)
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_nested_model()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_sharded_optim_state_dist(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_state_dist(2)
|
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from common import CONFIG, check_sharded_model_params
from torch.nn.parallel import DistributedDataParallel as DDP
import colossalai
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import BucketTensorShardStrategy, TensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from tests.components_to_test.registry import non_distributed_component_funcs
def _run_step(model, optimizer, data, label, criterion, enable_autocast=False):
model.train()
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
@parameterize("cpu_offload", [True, False])
@parameterize("use_cpuadam", [True, False])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
@parameterize("gpu_margin_mem_ratio", [0.0, 0.7])
def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, gpu_margin_mem_ratio):
test_models = ['repeated_computed_layers', 'resnet18', 'bert', 'hanging_param_model']
shard_strategy = shard_strategy_class()
if use_cpuadam and cpu_offload is False:
return
if gpu_margin_mem_ratio > 0.0 and not (cpu_offload and use_cpuadam):
return
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(target_device=torch.device(f'cpu:0') if cpu_offload else get_current_device(),
shard_strategy=shard_strategy,
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(
zero_model,
shard_strategy,
tensor_placement_policy='cpu' if cpu_offload else 'auto',
reuse_fp16_shard=use_cpuadam,
)
model = model_builder(checkpoint=True).half()
col_model_deepcopy(zero_model, model)
model = model.cuda().float()
if use_cpuadam:
optimizer_class = CPUAdam
optim = optimizer_class(model.parameters(), lr=1e-3)
sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model,
sharded_optim,
initial_scale=2**5,
gpu_margin_mem_ratio=gpu_margin_mem_ratio)
amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False)
apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config)
if dist.get_world_size() > 1:
apex_model = DDP(apex_model, device_ids=[torch.cuda.current_device()])
for i, (data, label) in enumerate(train_dataloader):
if i > 5:
break
data, label = data.cuda(), label.cuda()
_run_step(apex_model, apex_optimizer, data, label, criterion, False)
_run_step(zero_model, sharded_optim, data, label, criterion, False)
check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam)
for param in model.parameters():
assert not has_inf_or_nan(param)
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_test_sharded_optim_v2()
# use_cpuadam = True can be used with cpu_offload = False
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_sharded_optim_v2(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_v2(world_size=2)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params)
def run_dist(rank, world_size, port, parallel_config):
colossalai.launch(config=parallel_config,
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True):
colo_model = model_builder(checkpoint=True)
colo_optimizer = optimizer_class(colo_model.parameters(), lr=1e-3)
engine, train_dataloader, _, _ = colossalai.initialize(colo_model,
optimizer=colo_optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
torch_model = model_builder(checkpoint=True).half()
col_model_deepcopy(engine.model, torch_model)
torch_model = torch_model.cuda().float()
engine.train()
torch_optimizer = optimizer_class(torch_model.parameters(), lr=1e-3)
if dist.get_world_size() > 1:
torch_model = DDP(torch_model, device_ids=[torch.cuda.current_device()])
i = 0
for data, label in train_dataloader:
if i > 4:
break
data, label = data.cuda(), label.cuda()
engine.zero_grad()
torch_optimizer.zero_grad()
if criterion:
output = engine(data)
loss = engine.criterion(output, label)
torch_output = torch_model(data)
torch_loss = engine.criterion(torch_output, label)
else:
loss = engine(data, label)
torch_loss = torch_model(data, label)
engine.backward(loss)
engine.step()
torch_loss.backward()
for param in torch_model.parameters():
if param.grad is not None:
assert not has_inf_or_nan(param.grad)
torch_optimizer.step()
i += 1
if parallel_config == MP_PARALLEL_CONFIG:
check_params(torch_model, colo_model, loose=True)
elif parallel_config == ZERO_PARALLEL_CONFIG:
check_sharded_model_params(torch_model, colo_model, loose=True)
# FIXME: enable this test in next PR
@pytest.mark.skip
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_if_address_is_in_use()
def test_mp_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=MP_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_zero_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=ZERO_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_engine(world_size=4)
|
from functools import partial
import torch
import torch.distributed as dist
from colossalai.logging import get_dist_logger
from colossalai.utils import checkpoint
from colossalai.zero.shard_utils import TensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
LOGGER = get_dist_logger('zero_test')
MP_PARALLEL_CONFIG = dict(fp16=dict(mode=None,), parallel=dict(pipeline=dict(size=1), tensor=dict(size=2, mode=None)))
_ZERO_MODEL_CONFIG = dict(reduce_scatter_bucket_size_mb=25,
fp32_reduce_scatter=False,
tensor_placement_policy='cuda',
gradient_predivide_factor=1.0,
shard_strategy=TensorShardStrategy(),
reuse_fp16_shard=False)
_ZERO_OPTIMIZER_CONFIG = dict(initial_scale=2**5,
min_scale=1,
growth_factor=2,
backoff_factor=0.5,
growth_interval=1000,
hysteresis=2,
max_scale=2**32)
ZERO_PARALLEL_CONFIG = dict(fp16=dict(mode=None,),
zero=dict(
model_config=_ZERO_MODEL_CONFIG,
optimizer_config=_ZERO_OPTIMIZER_CONFIG,
),
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
CONFIG = dict(fp16=dict(mode=None,),
zero=dict(level=3,
verbose=False,
offload_optimizer_config=dict(device='cpu', pin_memory=True, buffer_count=5, fast_init=False),
offload_param_config=dict(device='cpu',
pin_memory=True,
buffer_count=5,
buffer_size=1e8,
max_in_cpu=1e9)),
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
model.backward(loss)
else:
loss.backward()
def checkpoint_wrapper(module, enable=True):
if enable:
module.forward = partial(checkpoint, module.forward)
return module
def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool:
if loose:
return torch.allclose(tensor_a, tensor_b, atol=1e-2, rtol=1e-3)
return torch.allclose(tensor_a, tensor_b)
def check_grads(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
grad = p.grad.float()
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose)
def check_params(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
# assert p.dtype == zero_p.dtype
assert allclose(p.float(), zero_p.float(), loose=loose), f"diff {p.float() - zero_p.float()}"
def check_grads_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for (name, p), (zero_name, zero_p) in zip(model.named_parameters(), zero_model.named_parameters()):
# zero_grad = zero_p.grad.clone().to(p.device)
if zero_p.colo_attr.is_replicated:
zero_grad = zero_p.colo_attr.grad_payload.clone().to(p.device)
chunks = torch.flatten(p.grad).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
grad = chunks[rank].float()
if zero_grad.size(0) > grad.size(0):
zero_grad = zero_grad[:grad.size(0)]
else:
zero_grad = zero_p.colo_attr.grad_payload
grad = p.grad.to(zero_grad.dtype)
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose), f'diff: {grad - zero_grad}'
def check_params_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank]
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose)
def check_sharded_model_params(model, zero_model, loose=False, reuse_fp16_shard=False):
rank = dist.get_rank()
for (name, p), (zero_name, zero_p) in zip(model.named_parameters(), zero_model.named_parameters()):
if zero_p.colo_attr.param_is_sharded:
zero_p = zero_p.colo_attr.data_payload.to(p.device).float()
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank].float()
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
else:
zero_p = zero_p.colo_attr.data_payload.to(p.device)
assert p.dtype == zero_p.dtype, "Parameter `{}`:\n{} vs {}".format(name, p.dtype, zero_p.dtype)
assert allclose(p, zero_p, loose=loose), f'{p} vs {zero_p}'
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from common import CONFIG
import colossalai
from colossalai.gemini.memory_tracer.utils import colo_model_mem_usage
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory import colo_device_memory_used
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import BucketTensorShardStrategy, TensorShardStrategy
from tests.components_to_test.registry import non_distributed_component_funcs
@parameterize("init_device_type", ['cpu', 'cuda'])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_model_test(init_device_type, shard_strategy_class):
logger = get_dist_logger("test_zero_init")
for name, get_components_func in non_distributed_component_funcs._registry.items():
# because the ZeroInitContext automatically turns parameters to fp16
# and the beit model use tensor.erfinv_() function to initialize weights
# tensor.erfinv_() doesn't support Half in CPU, we omit the beit model
if name == 'beit':
continue
model_builder, _, _, _, _ = get_components_func()
if init_device_type == 'cuda':
init_device = get_current_device()
elif init_device_type == 'cpu':
init_device = torch.device("cpu")
else:
continue
model_numel_tensor = torch.zeros(1, dtype=torch.int)
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor):
model = model_builder(checkpoint=True)
for param in model.parameters():
assert hasattr(param, 'colo_attr')
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
assert param.colo_attr.sharded_data_tensor.is_sharded
assert param.colo_attr.data_payload.device.type == init_device.type, \
f'{param.colo_attr.data_payload.device.type} vs. {init_device.type}'
cuda_mem_use, _ = colo_model_mem_usage(model)
model_data_cuda_mem_MB = cuda_mem_use / 1e6
logger.info(f"Existing ZeRO Context.\nModel Data CUDA Memory {model_data_cuda_mem_MB} MB", ranks=[0])
sys_cuda_mem_MB = colo_device_memory_used(get_current_device()) / 1e6
logger.info(f"System CUDA Memory Usage {sys_cuda_mem_MB} MB", ranks=[0])
logger.info(f"Model Number Parameter {model_numel_tensor.numpy()[0]/1e6} M", ranks=[0])
def run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_model_test()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 4])
@rerun_if_address_is_in_use()
def test_zero_init_context(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_init_context(1)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from common import CONFIG, check_grads_padding, run_fwd_bwd
from torch.nn.parallel import DistributedDataParallel as DDP
import colossalai
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import BucketTensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_model._utils import cast_tensor_to_fp16
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from tests.components_to_test.registry import non_distributed_component_funcs
@parameterize("enable_autocast", [True])
@parameterize("shard_strategy_class", [BucketTensorShardStrategy])
def run_model_test(enable_autocast, shard_strategy_class):
test_models = ['repeated_computed_layers', 'resnet18', 'bert', 'hanging_param_model']
shard_strategy = shard_strategy_class()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, _, criterion = get_components_func()
with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()),
shard_strategy=shard_strategy,
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(zero_model, shard_strategy)
model = model_builder(checkpoint=True).half()
col_model_deepcopy(zero_model, model)
model = model.cuda()
model = DDP(model, device_ids=[torch.cuda.current_device()])
for i, (data, label) in enumerate(train_dataloader):
if i > 5:
break
data, label = cast_tensor_to_fp16(data).cuda(), label.cuda()
run_fwd_bwd(model, data, label, criterion, enable_autocast)
run_fwd_bwd(zero_model, data, label, criterion, enable_autocast)
check_grads_padding(model, zero_model, loose=True)
def run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_model_test()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_shard_model_v2(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_shard_model_v2(world_size=2)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from tests.components_to_test.registry import non_distributed_component_funcs
from common import CONFIG
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_zero_state_dict(shard_strategy_class):
test_models = ['repeated_computed_layers', 'resnet18']
shard_strategy = shard_strategy_class()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer, criterion = get_components_func()
with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()),
shard_strategy=shard_strategy,
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(zero_model, shard_strategy)
model = model_builder(checkpoint=True).half()
col_model_deepcopy(zero_model, model)
model = model.cuda()
zero_state_dict = zero_model.state_dict()
for key, val in model.state_dict().items():
assert torch.equal(val, zero_state_dict[key].to(val.device))
def run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_zero_state_dict()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_zero_state_dict(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_state_dict(2)
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
import colossalai
from colossalai.tensor import ProcessGroup
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port, get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.zero import LowLevelZeroOptimizer
from tests.test_tensor.common_utils import set_seed, split_param_col_tp1d, split_param_row_tp1d, tensor_shard_equal
def strict_shard_equal(tensor, shard, tp_pg, rtol=1e-3, atol=1e-4):
return tensor_shard_equal(tensor, shard, tp_pg.tp_local_rank(), tp_pg.tp_world_size(), rtol, atol)
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(32, 128)
self.act = nn.GELU()
self.linear2 = nn.Linear(128, 32)
def forward(self, x):
y = self.linear1(x)
y = self.act(y)
y = self.linear2(y)
return x + y
@parameterize("overlap_flag", [False, True])
@parameterize("partition_flag", [False, True])
def exam_zero_with_tp(overlap_flag, partition_flag):
set_seed(233010)
tp_pg = ProcessGroup(tp_degree=2)
with ColoInitContext(device=get_current_device(), default_pg=tp_pg):
hybrid_model = MlpModel()
torch_model = MlpModel().cuda()
for pt, ph in zip(torch_model.parameters(), hybrid_model.parameters()):
pt.data.copy_(ph.data)
for name, param in hybrid_model.named_parameters():
if 'linear1' in name:
split_param_row_tp1d(param, tp_pg)
param.compute_spec.set_output_replicate(False)
if 'linear2.weight' in name:
split_param_col_tp1d(param, tp_pg)
torch_model = DDP(torch_model, device_ids=[tp_pg.rank()], process_group=tp_pg.dp_process_group())
torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-2) # set to 1e-2 for torch-1.11
hybrid_optim = torch.optim.Adam(hybrid_model.parameters(), lr=1e-2)
hybrid_optim = LowLevelZeroOptimizer(hybrid_optim,
initial_scale=2,
clip_grad_norm=1.0,
overlap_communication=overlap_flag,
partition_grad=partition_flag)
dp_local_rank = tp_pg.dp_local_rank()
set_seed(255 + dp_local_rank)
data = torch.randn(8, 32, device=get_current_device())
torch_loss = torch_model(data).sum()
hybrid_loss = hybrid_model(data).sum()
assert_close(torch_loss, hybrid_loss)
torch_loss.backward()
torch.nn.utils.clip_grad_norm_(torch_model.parameters(), 1.0)
hybrid_optim.backward(hybrid_loss)
torch_optim.step()
hybrid_optim.step()
for (name, pt), ph in zip(torch_model.named_parameters(), hybrid_model.parameters()):
assert strict_shard_equal(pt.data, ph.data, tp_pg)
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_with_tp()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_zero_with_tp():
world_size = 4
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_with_tp()
|
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import colossalai
from colossalai.tensor import ProcessGroup
from colossalai.utils import free_port, get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.zero import LowLevelZeroOptimizer
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(128, 256)
self.linear2 = nn.Linear(256, 512)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
def exam_zero_init():
dp_2_tp_2_pg = ProcessGroup(dp_degree=2, tp_degree=2)
model1 = MlpModel().cuda()
with ColoInitContext(device=get_current_device(), default_pg=dp_2_tp_2_pg):
model2 = MlpModel()
optimizer1 = LowLevelZeroOptimizer(torch.optim.Adam(model1.parameters(), lr=1))
optimizer2 = LowLevelZeroOptimizer(torch.optim.Adam(model2.parameters(), lr=1))
assert optimizer1._local_rank == optimizer2._local_rank
assert optimizer1._world_size == optimizer2._world_size
assert optimizer1._dp_global_ranks == optimizer2._dp_global_ranks
mp_group1 = optimizer1._mp_torch_group
mp_group2 = optimizer2._mp_torch_group
assert dist.get_world_size(mp_group1) == dist.get_world_size(mp_group2)
assert dist.get_rank(mp_group1) == dist.get_rank(mp_group2)
def run_dist(rank, world_size, port):
config_dict = dict(parallel=dict(data=2, tensor=dict(size=2, mode='1d')))
colossalai.launch(config=config_dict, rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_init()
@pytest.mark.dist
def test_zero_init():
world_size = 4
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_init()
|
import copy
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
import colossalai
from colossalai.testing.random import seed_all
from colossalai.utils import free_port
from colossalai.zero import LowLevelZeroOptimizer
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(128, 256)
self.linear2 = nn.Linear(256, 512)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
def exam_zero_1_2_grad_acc():
local_rank = torch.distributed.get_rank()
seed_all(2009)
# create model
zero1_model = MlpModel().cuda()
zero2_model = copy.deepcopy(zero1_model)
# create optimizer
zero1_optimizer = torch.optim.Adam(zero1_model.parameters(), lr=1)
zero2_optimizer = torch.optim.Adam(zero2_model.parameters(), lr=1)
zero1_optimizer = LowLevelZeroOptimizer(zero1_optimizer,
overlap_communication=True,
initial_scale=32,
clip_grad_norm=1.0,
verbose=True)
zero2_optimizer = LowLevelZeroOptimizer(zero2_optimizer,
overlap_communication=True,
partition_grad=True,
initial_scale=32,
clip_grad_norm=1.0)
# create data
seed_all(2021 + local_rank)
input_data1 = torch.randn(32, 128).cuda()
input_data2 = torch.randn(32, 128).cuda()
def fwd_bwd_func(number, cur_data):
# zero-dp forward
zero1_output = zero1_model(cur_data)
zero2_output = zero2_model(cur_data)
assert torch.equal(zero1_output, zero2_output)
# zero-dp backward
zero1_optimizer.backward(zero1_output.sum().float(), sync_grad=False)
zero2_optimizer.backward(zero2_output.sum().float(), sync_grad=False)
for (n, z1p), z2p in zip(zero1_model.named_parameters(), zero2_model.parameters()):
if z2p.grad is not None:
# print(local_rank, n, z1p.shape, torch.max(z2p.grad), torch.max(torch.abs(z1p.grad - z2p.grad)))
assert torch.equal(z1p.grad, z2p.grad)
zero1_optimizer._sync_grad()
zero2_optimizer._sync_grad()
fwd_bwd_func(0, input_data1)
fwd_bwd_func(1, input_data2)
# step
zero1_optimizer.step()
zero2_optimizer.step()
# check updated param
for z1p, z2p in zip(zero1_model.parameters(), zero2_model.parameters()):
assert torch.equal(z1p.data, z2p.data)
def exam_zero_1_grad_acc():
local_rank = torch.distributed.get_rank()
grad_scale = 32
seed_all(2008)
# create models
zero_model = MlpModel()
torch_model = copy.deepcopy(zero_model)
seed_all(2008)
zero_model = zero_model.cuda()
torch_model = DDP(torch_model.cuda(), bucket_cap_mb=0)
# create optimizer
zero_optimizer = torch.optim.Adam(zero_model.parameters(), lr=1)
# we only test stage 1 here
# in `check_sharded_param_consistency.py`, we will test whether
# level 1 and 2 will produce exactly the same results
zero_optimizer = LowLevelZeroOptimizer(zero_optimizer,
overlap_communication=False,
initial_scale=grad_scale,
reduce_bucket_size=262144,
clip_grad_norm=1.0)
torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=1)
# create data
seed_all(2022 + local_rank)
input_data1 = torch.randn(32, 128).cuda()
input_data2 = torch.randn(32, 128).cuda()
def fwd_bwd_func(number, cur_data, check_flag):
# zero-dp forward
zero_output = zero_model(cur_data)
# torch-ddp forward
torch_output = torch_model(cur_data)
assert torch.equal(zero_output, torch_output)
# zero-dp backward
zero_optimizer.backward(zero_output.sum().float(), sync_grad=False)
# torch-ddp backward
torch_output.sum().backward()
if check_flag:
# check grad
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
unscale_grad = z1p.grad / grad_scale
# print(n, p.shape, torch.max(torch.abs(p.grad - unscale_grad)))
assert torch.equal(p.grad, unscale_grad)
zero_optimizer._sync_grad()
fwd_bwd_func(0, input_data1, True)
fwd_bwd_func(1, input_data2, False)
zero_optimizer.step()
torch.nn.utils.clip_grad_norm_(torch_model.parameters(), 1.0)
torch_optimizer.step()
# check updated param
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
# print(n, p.shape, torch.max(p.data), torch.max(z1p.data), torch.max(torch.abs(p.data - z1p.data)))
assert_close(p.data, z1p.data)
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_1_grad_acc()
exam_zero_1_2_grad_acc()
@pytest.mark.dist
def test_grad_accumulation():
world_size = 2
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_grad_accumulation()
|
import copy
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
import colossalai
from colossalai.testing.random import seed_all
from colossalai.utils import free_port
from colossalai.zero import LowLevelZeroOptimizer
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(128, 256)
self.linear2 = nn.Linear(256, 512)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
def half_close(a, b, loose=False):
rtol = None
atol = None
if loose:
rtol = 5e-2
atol = 5e-4
a = a.detach().half()
b = b.detach().half()
assert_close(a, b, rtol=rtol, atol=atol)
def exam_zero_1_2():
"""
In this test, we want to test whether zero stage 1 and 2
deliver the same numerical results despite different communication
pattern
we use these prefixes to differentiate the zero stage
oss: partition optimizer states
pg: partition gradients and optimizer states
"""
local_rank = torch.distributed.get_rank()
seed_all(2001)
# create model
zero1_model = MlpModel().cuda()
zero2_model = copy.deepcopy(zero1_model)
# create optimizer
zero1_optimizer = torch.optim.Adam(zero1_model.parameters(), lr=1)
zero2_optimizer = torch.optim.Adam(zero2_model.parameters(), lr=1)
zero1_optimizer = LowLevelZeroOptimizer(zero1_optimizer,
overlap_communication=True,
initial_scale=128,
verbose=True)
zero2_optimizer = LowLevelZeroOptimizer(zero2_optimizer,
overlap_communication=True,
partition_grad=True,
initial_scale=128)
# create data
seed_all(2001 + local_rank)
input_data = torch.randn(32, 128).cuda()
zero1_output = zero1_model(input_data)
zero2_output = zero2_model(input_data)
assert torch.equal(zero1_output, zero2_output)
# zero-dp backward
zero1_optimizer.backward(zero1_output.mean().float(), sync_grad=False)
zero2_optimizer.backward(zero2_output.mean().float(), sync_grad=False)
for (n, z1p), z2p in zip(zero1_model.named_parameters(), zero2_model.parameters()):
if z2p.grad is not None:
# print(local_rank, n, z1p.shape, torch.max(z2p.grad), torch.max(torch.abs(z1p.grad - z2p.grad)))
assert torch.equal(z1p.grad, z2p.grad)
zero1_optimizer._sync_grad()
zero2_optimizer._sync_grad()
# step
zero1_optimizer.step()
zero2_optimizer.step()
# check updated param
for z1p, z2p in zip(zero1_model.parameters(), zero2_model.parameters()):
assert torch.equal(z1p.data, z2p.data)
def exam_zero_1_torch_ddp():
"""
In this test, two pairs of model and optimizers are created.
1. zero: use sharded optimizer and fp16 parameters
2. torch: use torch DDP and fp32 parameters
We feed these two sets of models with the same input and check if the
differences in model output and updated parameters are within tolerance.
"""
local_rank = torch.distributed.get_rank()
seed_all(1453)
# create models
zero_model = MlpModel()
torch_model = copy.deepcopy(zero_model)
zero_model = zero_model.cuda().half()
torch_model = DDP(torch_model.cuda(), bucket_cap_mb=0)
torch_model = torch_model.cuda()
# for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
# half_close(p.data, z1p.data)
# create optimizer
zero_optimizer = torch.optim.SGD(zero_model.parameters(), lr=1)
# we only test stage 1 here
# in `check_sharded_param_consistency.py`, we will test whether
# level 1 and 2 will produce exactly the same results
zero_optimizer = LowLevelZeroOptimizer(zero_optimizer,
overlap_communication=True,
initial_scale=1,
reduce_bucket_size=262144)
torch_optimizer = torch.optim.SGD(torch_model.parameters(), lr=1)
seed_all(1453 + local_rank)
# create
input_data = torch.rand(32, 128).cuda()
# zero-dp forward
zero_output = zero_model(input_data.half())
# torch-ddp forward
torch_output = torch_model(input_data)
half_close(zero_output, torch_output, loose=True)
# zero-dp backward
zero_optimizer.backward(zero_output.mean().float(), sync_grad=False)
# torch-ddp backward
torch_output.mean().backward()
# check grad
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
half_close(p.grad, z1p.grad, loose=True)
# zero-dp step
zero_optimizer._sync_grad()
zero_optimizer.step()
# torch ddp step
torch_optimizer.step()
# check updated param
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
# print(n, torch.max(torch.abs(p.data - z1p.data)))
half_close(p.data, z1p.data, loose=True)
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_1_torch_ddp()
exam_zero_1_2()
@pytest.mark.dist
def test_zero_1_2():
world_size = 2
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_1_2()
|
import pytest
import torch
from einops import rearrange
from colossalai.kernel.cuda_native.flash_attention import HAS_FLASH_ATTN, HAS_MEM_EFF_ATTN, HAS_TRITON
if HAS_FLASH_ATTN:
from colossalai.kernel.cuda_native.flash_attention import (
MaskedFlashAttention,
flash_attention_q_k_v,
flash_attention_q_kv,
flash_attention_qkv,
)
if HAS_TRITON:
from colossalai.kernel.cuda_native.flash_attention import triton_flash_attention
if HAS_MEM_EFF_ATTN:
from colossalai.kernel.cuda_native.flash_attention import LowerTriangularMask, MemoryEfficientAttention
def baseline_attention(Z, N_CTX, H, q, k, v, sm_scale):
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
for z in range(Z):
for h in range(H):
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
ref_out = torch.matmul(p, v)
return ref_out
@pytest.mark.skipif(HAS_TRITON == False, reason="triton is not available")
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(3, 4, 2, 16)])
def test_triton_flash_attention(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
sm_scale = 0.3
dout = torch.randn_like(q)
ref_out = baseline_attention(Z, N_CTX, H, q, k, v, sm_scale)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = triton_flash_attention(q, k, v, sm_scale)
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-3)
assert torch.allclose(ref_dv, tri_dv, atol=1e-3)
assert torch.allclose(ref_dk, tri_dk, atol=1e-3)
assert torch.allclose(ref_dq, tri_dq, atol=1e-3)
@pytest.mark.skipif(HAS_FLASH_ATTN == False, reason="flash is not available")
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(3, 4, 2, 16)])
def test_flash_attention(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
torch.manual_seed(20)
q = torch.randn((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
k = torch.randn((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
v = torch.randn((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
sm_scale = 0.3
dout = torch.randn_like(q)
# reference implementation
ref_out = baseline_attention(Z, N_CTX, H, q, k, v, sm_scale)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# flash implementation
q, k, v = map(lambda x: rearrange(x, 'z h n d -> (z n) h d'), [q, k, v])
dout = rearrange(dout, 'z h n d -> (z n) h d').detach()
for i in range(3):
if i == 0:
tri_out = flash_attention_q_k_v(q, k, v, sm_scale, Z, N_CTX, N_CTX, causal=True)
elif i == 1:
kv = torch.cat((k.unsqueeze(1), v.unsqueeze(1)), dim=1)
tri_out = flash_attention_q_kv(q, kv, sm_scale, Z, N_CTX, N_CTX, causal=True)
else:
qkv = torch.cat((q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1)), dim=1)
tri_out = flash_attention_qkv(qkv, sm_scale, Z, N_CTX, causal=True)
tri_out.backward(dout, retain_graph=True)
if i == 0:
tri_dq, tri_dk, tri_dv, = torch.autograd.grad(tri_out, (q, k, v), dout)
tri_out, tri_dq, tri_dk, tri_dv = map(lambda x: rearrange(x, '(z n) h d -> z h n d', z=Z),
(tri_out, tri_dq, tri_dk, tri_dv))
elif i == 1:
tri_dq, tri_dkv, = torch.autograd.grad(tri_out, (q, kv), dout)
tri_dk, tri_dv = torch.chunk(tri_dkv, 2, dim=1)
tri_out, tri_dq, tri_dk, tri_dv = map(lambda x: rearrange(x, '(z n) h d -> z h n d', z=Z),
(tri_out, tri_dq, tri_dk.squeeze(1), tri_dv.squeeze(1)))
else:
tri_dqkv, = torch.autograd.grad(tri_out, (qkv), dout)
tri_dq, tri_dk, tri_dv = torch.chunk(tri_dqkv, 3, dim=1)
tri_out, tri_dq, tri_dk, tri_dv = map(lambda x: rearrange(x, '(z n) h d -> z h n d', z=Z),
(tri_out, tri_dq.squeeze(1), tri_dk.squeeze(1), tri_dv.squeeze(1)))
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-3)
assert torch.allclose(ref_dv, tri_dv, atol=1e-3)
assert torch.allclose(ref_dk, tri_dk, atol=1e-3)
assert torch.allclose(ref_dq, tri_dq, atol=1e-3)
@pytest.mark.skipif(HAS_FLASH_ATTN == False, reason="flash is not available")
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(3, 4, 2, 16)])
def test_masked_flash_attention(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
attn = MaskedFlashAttention(N_CTX, D_HEAD, 0.1)
qkv = torch.randn((Z, H, 3 * N_CTX * D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
attention_mask = torch.randint(2, (Z, H)).cuda().bool()
out = attn(qkv, attention_mask)
dout = torch.rand_like(out)
out.backward(dout)
@pytest.mark.skipif(HAS_MEM_EFF_ATTN == False, reason="xformers is not available")
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 8, 4, 16)])
def test_memory_efficient_attention(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
attn = MemoryEfficientAttention(N_CTX * D_HEAD, N_CTX, 0.1)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
out = attn(q, k, v, attention_mask=LowerTriangularMask())
dout = torch.rand_like(out)
out.backward(dout)
if __name__ == '__main__':
test_flash_attention(3, 4, 2, 16)
|
import torch
from colossalai.utils.model.lazy_init_context import LazyInitContext
from torchvision.models import resnet34
import random
import numpy as np
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
def test_lazy_init_with_meta():
ctx = LazyInitContext(to_meta=True)
with ctx:
model = resnet34(num_classes=10)
for param in model.parameters():
assert param.is_meta
for buffer in model.buffers():
assert buffer.is_meta
ctx.lazy_init_parameters(model)
for name, param in model.named_parameters():
assert not param.is_meta, name
for buffer in model.buffers():
assert not buffer.is_meta
def test_lazy_init_without_meta():
ctx = LazyInitContext(to_meta=False)
with ctx:
model = resnet34(num_classes=10)
for param in model.parameters():
assert not param.is_meta
for buffer in model.buffers():
assert not buffer.is_meta
conv1_weight_before_init = model.conv1.weight.clone()
ctx.lazy_init_parameters(model)
conv1_weight_after_init = model.conv1.weight.clone()
assert not torch.allclose(conv1_weight_after_init, conv1_weight_before_init)
if __name__ == '__main__':
test_lazy_init_with_meta()
test_lazy_init_without_meta()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import copy
import colossalai
from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import disable_existing_loggers
from colossalai.utils import checkpoint, clip_grad_norm_fp32, free_port
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from colossalai.zero.shard_utils.tensor_shard_strategy import TensorShardStrategy
from functools import partial
from colossalai.testing import parameterize, rerun_if_address_is_in_use
def checkpoint_wrapper(module, enable=True):
if enable:
module.forward = partial(checkpoint, module.forward, False)
return module
class Net(nn.Module):
def __init__(self, checkpoint=False) -> None:
super().__init__()
self.fc1 = nn.Linear(5, 5)
self.fc2 = nn.Linear(5, 5)
self.fc3 = nn.Linear(5, 1)
if checkpoint:
self.fc1 = checkpoint_wrapper(self.fc1)
self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def run_step(model, optimizer, x, enable_autocast=False, norm_type=2.0):
model.train()
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=enable_autocast):
y = model(x)
loss = y.sum()
loss = loss.float()
loss.backward()
clip_grad(model, norm_type)
optimizer.step()
def clip_grad(model, norm_type):
if isinstance(model, DDP):
clip_grad_norm_(model.parameters(), max_norm=1.0, norm_type=norm_type)
else:
clip_grad_norm_fp32(model.parameters(), max_norm=1.0, norm_type=norm_type)
def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool:
if loose:
return torch.allclose(tensor_a, tensor_b, atol=1e-3, rtol=1e-3)
return torch.allclose(tensor_a, tensor_b)
def check_grads(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
chunks = torch.flatten(p.grad).chunk(4)
if rank >= len(chunks):
continue
grad = chunks[rank]
if zero_p.zero_shard_padding > 0:
zero_grad = zero_grad[:-zero_p.zero_shard_padding]
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose)
def check_params(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_shard_padding = zero_p.zero_shard_padding
zero_p = zero_p.clone().to(p.device)
chunks = torch.flatten(p).chunk(4)
if rank >= len(chunks):
continue
p = chunks[rank]
if zero_shard_padding > 0:
zero_p = zero_p[:-zero_shard_padding]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose)
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_zero_clip_grad():
world_size = 4
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_clip_grad()
|
from colossalai.tensor import distspec, ColoTensorSpec, ProcessGroup
from colossalai.tensor.colo_parameter import ColoParameter
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, get_current_device
from torch.nn.utils import clip_grad_norm_
from functools import partial
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.common import clip_grad_norm
from torch.nn.parameter import Parameter
def close(num: float, other: float, rtol: float = 1e-5, atol: float = 1e-8):
return abs(num - other) <= atol + rtol * other
def shard_param(p: ColoParameter) -> None:
pg = p.get_process_group()
p._redistribute(distspec.ShardSpec([0], [pg.tp_world_size()]))
p.grad = p.grad.chunk(pg.tp_world_size(), 0)[pg.tp_local_rank()].clone().detach()
def check_grad_equal(p: Parameter, colo_p: ColoParameter) -> None:
pg = colo_p.get_process_group()
if p.shape != colo_p.shape:
grad = p.grad.chunk(pg.tp_world_size(), 0)[pg.tp_local_rank()]
else:
grad = p.grad
assert torch.allclose(grad, colo_p.grad), f'diff: {torch.abs(grad - colo_p.grad)}'
@parameterize('dtype', [torch.float])
@parameterize('device', ['mixed', 'cuda', 'cpu'])
@parameterize('norm_type', [2.0, 3.0, float('inf')])
def run_grad_clip_norm(world_size: int, dtype: torch.dtype, device: str, norm_type: float):
print(f'{world_size}, {dtype}, {device}, {norm_type}')
cuda_device = get_current_device()
devices = [cuda_device] * 4
if device == 'cpu':
devices = [torch.device('cpu')] * 4
elif device == 'mixed':
devices = [cuda_device] * 2 + [torch.device('cpu')] * 2
pg = ProcessGroup(tp_degree=world_size)
params = [Parameter(torch.empty(4, 4, dtype=dtype, device=devices[i])) for i in range(4)]
colo_params = [
ColoParameter(torch.empty(4, 4, dtype=dtype, device=devices[i]), spec=ColoTensorSpec(pg)) for i in range(4)
]
for p, colo_p in zip(params, colo_params):
grad = torch.rand_like(p)
p.grad = grad
colo_p.grad = grad.clone().detach()
shard_param(colo_params[0])
shard_param(colo_params[2])
torch_norm = clip_grad_norm_(params, 1.0, norm_type=norm_type)
colo_norm = clip_grad_norm(colo_params, 1.0, norm_type=norm_type)
assert close(torch_norm, colo_norm), f'diff: {abs(torch_norm-colo_norm)}'
for p, colo_p in zip(params, colo_params):
check_grad_equal(p, colo_p)
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_grad_clip_norm(world_size=world_size)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_zero_clip_grad(world_size: int):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_clip_grad(2)
|
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.zero.sharded_param import ShardedTensor
from colossalai.gemini.tensor_utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
import colossalai
import torch
import torch.multiprocessing as mp
def run_tensor_move(rank):
colossalai.launch(config={}, rank=0, world_size=1, host='localhost', port=free_port(), backend='nccl')
src_t = torch.ones(2, 3).cuda()
tgt_t = torch.zeros(2, 3)
colo_model_data_tensor_move(src_t, tgt_t)
assert (torch.sum(tgt_t) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
src_t = torch.ones(2, 3)
tgt_t = torch.zeros(2, 3).cuda().half()
colo_model_data_tensor_move(src_t, tgt_t)
# the src_t has been removed
assert (src_t.numel() == 0)
assert (torch.sum(tgt_t) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
src_t = ShardedTensor(torch.ones(2, 3))
tgt_t = ShardedTensor(torch.zeros(2, 3).cuda().half())
colo_model_data_tensor_move(src_t, tgt_t)
assert (torch.sum(tgt_t.payload) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
assert (tgt_t.device.type == 'cuda')
colo_model_data_tensor_move_inline(tgt_t, torch.device('cpu'))
assert (tgt_t.device.type == 'cpu')
@rerun_if_address_is_in_use()
def test_tensor_move():
mp.spawn(run_tensor_move, nprocs=1)
if __name__ == '__main__':
test_tensor_move()
|
import os, shutil
import torch
import pytest
from copy import deepcopy
from functools import partial
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import MultiplicativeLR
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
import colossalai
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.tensor import ComputePattern, ComputeSpec, ColoTensor, ShardSpec, ProcessGroup
from colossalai.utils.checkpoint import save_checkpoint, load_checkpoint
from colossalai.nn.optimizer import ColossalaiOptimizer
from tests.components_to_test.registry import non_distributed_component_funcs
def init_1d_row_linear(weight: ColoTensor, pg: ProcessGroup):
spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
weight.set_process_group(pg)
weight.set_tensor_spec(*spec)
def init_1d_col_linear(weight, pg):
spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
weight.set_process_group(pg)
weight.set_tensor_spec(*spec)
def init_1d_row_embedding(weight, pg):
spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
weight.set_process_group(pg)
weight.set_tensor_spec(*spec)
def init_1d_col_embedding(weight, pg):
spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
weight.set_process_group(pg)
weight.set_tensor_spec(*spec)
def init_1d_row_for_linear_weight_spec(model, pg: ProcessGroup):
spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
for name, p in model.named_parameters():
if not isinstance(p, ColoTensor):
continue
if 'embed' in name and 'weight' in name:
init_1d_col_embedding(p, pg)
if 'proj1' in name and ('weight' in name or 'bias' in name):
init_1d_col_linear(p, pg)
if 'proj2' in name and 'weight' in name:
init_1d_row_linear(p, pg)
if 'classifier' in name and ('weight' in name or 'bias' in name):
init_1d_col_linear(p, pg)
def check_param_equal(model, torch_model):
for (n, p), (tn, tp) in zip(model.named_parameters(), torch_model.named_parameters()):
assert torch.all(p.data == tp.data), "{} went wrong.\n {} vs {}\n{}".format(n, p, tp, p.shape)
def remove(path):
""" param <path> could either be relative or absolute. """
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
raise ValueError("file {} is not a file or dir.".format(path))
def compare_optims(optim1, optim2):
state1 = optim1.state_dict()['state']
state2 = optim2.state_dict()['state']
for k, p1 in state1.items():
if k not in state2:
continue
p2 = state2[k]
for n, t1 in p1.items():
if n not in p2:
continue
t2 = p2[n]
if isinstance(t1, ColoTensor):
assert isinstance(t2, ColoTensor)
assert torch.allclose(t1, t2, rtol=0, atol=0)
def _run_checkpoint(model_name, init_spec_func, use_ddp, use_mp_reload, test_scheduler, pg):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
# set_seed(1)
with ColoInitContext(device=get_current_device()):
model = model_builder(checkpoint=True)
if use_mp_reload:
if 'bert' == model_name:
for name, p in model.named_parameters():
if not isinstance(p, ColoTensor):
continue
# num_class = type_vocab_size = 2 | (8, 2)
if 'classifier' in name and 'weight' in name:
init_1d_row_linear(p, pg)
# num_class = vocab_size = 30524 | (30524, 8)
elif 'word_embeddings' in name and 'weight' in name:
init_1d_row_embedding(p, pg)
# num_class = seq_len = 512 | (512, 8)
elif 'position_embeddings' in name and 'weight' in name:
init_1d_row_embedding(p, pg)
# num_class = type_vocab_size = 2 | (2, 8)
elif 'token_type_embeddings' in name and 'weight' in name:
init_1d_col_embedding(p, pg)
elif p.process_group.tp_world_size() == 1:
p.set_process_group(pg)
elif "simple_net" == model_name:
init_spec_func(model, pg)
model_reload = deepcopy(model)
model = model.cuda()
model.eval()
model_reload = model_reload.cuda()
model_reload.eval()
opt_class = torch.optim.Adam
colo_optimizer = ColossalaiOptimizer(opt_class(model.parameters(), lr=0.1))
colo_optimizer_reload = ColossalaiOptimizer(opt_class(model_reload.parameters(), lr=0.1))
for i, (data, label) in enumerate(train_dataloader):
# Zero grad
colo_optimizer.zero_grad()
colo_optimizer_reload.zero_grad()
data = data.to(get_current_device())
label = label.to(get_current_device())
dist.broadcast(data, pg.tp_rank_list()[0], pg.tp_process_group())
dist.broadcast(label, pg.tp_rank_list()[0], pg.tp_process_group())
# Bcast rank0 data to all processes
if criterion:
output = model(data)
output_reload = model_reload(data)
loss = criterion(output, label)
loss_reload = criterion(output_reload, label)
else:
loss = model(data, label)
loss_reload = model_reload(data, label)
loss.backward()
loss_reload.backward()
colo_optimizer.step()
colo_optimizer_reload.step()
if i > 2:
break
if not os.path.isdir('./checkpoint') and rank == 0:
os.mkdir('./checkpoint')
dist.barrier()
save_checkpoint('./checkpoint', 0, model, colo_optimizer, None)
load_checkpoint('./checkpoint', 0, model_reload, colo_optimizer_reload, None)
check_param_equal(model, model_reload)
compare_optims(colo_optimizer, colo_optimizer_reload)
if rank == 0:
remove('./checkpoint')
dist.barrier()
def run_dist(rank, world_size, port, use_ddp, use_mp_reload, test_scheduler):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
pg = ProcessGroup(tp_degree=world_size)
# the data loader of BERT is in DDP mode, causing the input data is not replicated in the TP context
for model_name in ['bert']:
_run_checkpoint(model_name,
init_1d_row_for_linear_weight_spec,
use_ddp,
use_mp_reload,
test_scheduler=test_scheduler,
pg=pg)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@pytest.mark.parametrize('use_ddp', [False])
@pytest.mark.parametrize('use_mp_reload', [True, False])
# @pytest.mark.parametrize('test_scheduler', ['colossalai_cosine_warmup', 'torch_cosine', 'torch_lambda'])
@rerun_if_address_is_in_use()
def test_checkpoint(world_size, use_ddp, use_mp_reload, test_scheduler=None):
run_func = partial(run_dist,
world_size=world_size,
port=free_port(),
use_ddp=use_ddp,
use_mp_reload=use_mp_reload,
test_scheduler=test_scheduler)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_checkpoint(2, use_ddp=False, use_mp_reload=True, test_scheduler="torch_cosine")
|
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory import colo_set_process_memory_fraction, colo_device_memory_capacity
from colossalai.utils import free_port
from functools import partial
import torch.multiprocessing as mp
def _run_colo_set_process_memory_fraction_and_colo_device_memory_capacity():
frac1 = colo_device_memory_capacity(get_current_device())
colo_set_process_memory_fraction(0.5)
frac2 = colo_device_memory_capacity(get_current_device())
assert frac2 * 2 == frac1
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_colo_set_process_memory_fraction_and_colo_device_memory_capacity()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [3, 4])
def test_memory_utils(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_memory_utils(world_size=2)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.nn.functional as F
from colossalai.context.parallel_mode import ParallelMode
from colossalai.context.random import add_seed, seed, set_mode, reset_seeds
from colossalai.utils.activation_checkpoint import checkpoint
def forward(x, weight):
out = torch.matmul(x, weight)
with seed(ParallelMode.DATA):
out_ = F.dropout(out, p=0.4, training=True)
return out_
def forward_inplace_ckpt(x, weight, cpu_offload=False):
out = torch.matmul(x, weight)
bn = torch.nn.BatchNorm1d(4, affine=False)
bn = bn.to(device="cuda")
out = bn(out)
def ckpt0(x):
return F.relu(x, inplace=True)
out = checkpoint(ckpt0, cpu_offload, out, use_reentrant=False)
return out
def forward_inplace(x, weight):
out = torch.matmul(x, weight)
bn = torch.nn.BatchNorm1d(4, affine=False)
bn = bn.to(device="cuda")
out = bn(out)
out = F.relu(out, inplace=True)
return out
@pytest.mark.gpu
@pytest.mark.parametrize("use_reentrant", [True, False])
@pytest.mark.parametrize("cpu_offload", [True, False])
def test_activation_checkpointing(cpu_offload, use_reentrant):
# as seed manager is singleton
# if we don't reset seeds here,
# other tests might affect this test
reset_seeds()
# We put initilization here to avoid change cuda rng state below
inputs = torch.rand(2, 2, requires_grad=True, device='cuda')
weight = torch.rand(2, 4, requires_grad=True, device='cuda')
# Get a copy of input tensors
inputs_ = torch.empty(2, 2, requires_grad=True, device='cuda')
inputs_.data.copy_(inputs.data)
weight_ = torch.empty(2, 4, requires_grad=True, device='cuda')
weight_.data.copy_(weight.data)
add_seed(ParallelMode.GLOBAL, 1024)
add_seed(ParallelMode.DATA, 1026)
set_mode(ParallelMode.GLOBAL)
global_cuda_rng_state = torch.cuda.get_rng_state()
set_mode(ParallelMode.DATA)
data_parallel_cuda_rng_state = torch.cuda.get_rng_state()
set_mode(ParallelMode.GLOBAL)
out = forward(inputs, weight)
loss = out.sum()
loss.backward()
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = checkpoint(forward, cpu_offload, inputs_, weight_, use_reentrant=use_reentrant)
loss = out.sum()
loss.backward()
assert torch.all(inputs.grad == inputs_.grad), 'Gradient of the input does not match'
torch.cuda.empty_cache()
# Extra test for use_reentrant=False
if use_reentrant == False:
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = forward_inplace(inputs, weight)
loss = out.sum()
loss.backward()
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = forward_inplace_ckpt(inputs_, weight_, cpu_offload=cpu_offload)
loss = out.sum()
loss.backward()
assert torch.all(inputs.grad == inputs_.grad), 'Gradient of the input does not match'
torch.cuda.empty_cache()
# as seed manager is singleton
# if we don't reset seeds here,
# other tests will fail if running together with this test
# as other tests can't overwrite the seed set by this test
reset_seeds()
if __name__ == "__main__":
test_activation_checkpointing(False, False)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, get_current_device, is_using_pp
from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.testing import rerun_on_exception, skip_if_not_enough_gpus
def build_pipeline(model):
from colossalai.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_2p5d(rank, world_size, port):
config = dict(parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, depth=1, mode="2.5d")),)
disable_existing_loggers()
launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_2p5d():
world_size = 8
run_func = partial(check_checkpoint_2p5d, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
test_checkpoint_2p5d()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, get_current_device, is_using_pp
from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.testing import rerun_on_exception, skip_if_not_enough_gpus
def build_pipeline(model):
from colossalai.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_3d(rank, world_size, port):
config = dict(parallel=dict(pipeline=dict(size=1), tensor=dict(size=8, mode="3d")),)
disable_existing_loggers()
launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_3d():
world_size = 8
run_func = partial(check_checkpoint_3d, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
test_checkpoint_3d()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, get_current_device, is_using_pp
from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.testing import rerun_on_exception, skip_if_not_enough_gpus
def build_pipeline(model):
from colossalai.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_2d(rank, world_size, port):
config = dict(parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, mode="2d")),)
disable_existing_loggers()
launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_2d():
world_size = 8
run_func = partial(check_checkpoint_2d, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
test_checkpoint_2d()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, is_using_pp
from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.testing import rerun_on_exception, skip_if_not_enough_gpus
def build_pipeline(model):
from colossalai.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_1d(rank, world_size, port):
config = dict(parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, mode="1d")),)
disable_existing_loggers()
launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_1d():
world_size = 8
run_func = partial(check_checkpoint_1d, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
test_checkpoint_1d()
|
import os
from functools import partial
from tempfile import TemporaryDirectory
from typing import Dict
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.checkpoint_io.constant import (GLOBAL_META_FILE_NAME, META_CKPT_FILE_NAME, MODEL_CKPT_FILE_NAME,
OTHER_CKPT_FILE_NAME)
from colossalai.utils.checkpoint_io.io import save
from colossalai.utils.checkpoint_io.meta import ParamDistMeta
from torch import Tensor
from torch.optim import Adam
def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None:
assert set(a.keys()) == set(b.keys())
for k, v in a.items():
assert torch.equal(v, b[k])
def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None:
assert set(a['state'].keys()) == set(b['state'].keys())
for k, state in a['state'].items():
b_state = b['state'][k]
for v1, v2 in zip(state.values(), b_state.values()):
if isinstance(v1, Tensor):
assert torch.equal(v1, v2)
else:
assert v1 == v2
if not ignore_param_gruops:
assert a['param_groups'] == b['param_groups']
class DummyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(20, 1)
def prepare_model_optim():
model = DummyModel()
for p in model.parameters():
p.grad = torch.ones_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
return model, optimizer
def test_overwrite():
model = DummyModel()
with TemporaryDirectory() as dir_name:
with open(os.path.join(dir_name, MODEL_CKPT_FILE_NAME.replace('.bin', '-shard0.bin')), 'a') as f:
pass
with pytest.raises(RuntimeError, match=r'Save error: Checkpoint ".+" exists\. \(overwrite = False\)'):
save(dir_name, model)
def test_save_global():
model, optimizer = prepare_model_optim()
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer)
assert len(os.listdir(dir_name)) == 5
global_meta = torch.load(os.path.join(dir_name, GLOBAL_META_FILE_NAME))
assert len(global_meta['meta']) == 1 and global_meta['meta'][0] == META_CKPT_FILE_NAME
meta = torch.load(os.path.join(dir_name, META_CKPT_FILE_NAME))
assert len(meta['model']) == 1
assert len(meta['optimizer']) == 1
model_state_dict = torch.load(os.path.join(dir_name, meta['model'][0]))
check_model_state_dict(model.state_dict(), model_state_dict)
optimizer_state_dict = torch.load(os.path.join(dir_name, meta['optimizer'][0]))
check_optim_state_dict(optimizer.state_dict(), optimizer_state_dict)
other_state_dict = torch.load(os.path.join(dir_name, OTHER_CKPT_FILE_NAME))
assert len(other_state_dict) == 0
def test_save_global_shard():
model, optimizer = prepare_model_optim()
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer, max_shard_size_gb=80 / 1024**3)
assert len(os.listdir(dir_name)) == 7
meta = torch.load(os.path.join(dir_name, META_CKPT_FILE_NAME))
assert len(meta['model']) == 2 and len(meta['optimizer']) == 2
model_state_dicts = [torch.load(os.path.join(dir_name, name)) for name in meta['model']]
assert len(set(model_state_dicts[0].keys()) & set(model_state_dicts[1].keys())) == 0
check_model_state_dict(model.state_dict(), {**model_state_dicts[0], **model_state_dicts[1]})
optimizer_state_dicts = [torch.load(os.path.join(dir_name, name)) for name in meta['optimizer']]
assert len(set(optimizer_state_dicts[0]['state'].keys()) & set(optimizer_state_dicts[1]['state'].keys())) == 0
assert 'param_groups' in optimizer_state_dicts[0] and 'param_groups' not in optimizer_state_dicts[1]
check_optim_state_dict(
optimizer.state_dict(), {
'state': {
**optimizer_state_dicts[0]['state'],
**optimizer_state_dicts[1]['state']
},
'param_groups': optimizer_state_dicts[0]['param_groups']
})
def run_dist(rank, world_size, port, func):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
func()
def run_save_dist(dir_name):
model, optmizer = prepare_model_optim()
dist_metas = {
'fc.weight': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1),
'fc.bias': ParamDistMeta(dist.get_rank(), dist.get_world_size(), 0, 1)
}
save(dir_name, model, optmizer, dist_meta=dist_metas)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_save_dist():
with TemporaryDirectory() as dir_name:
fn = partial(run_save_dist, dir_name)
world_size = 2
proc_fn = partial(run_dist, world_size=world_size, port=free_port(), func=fn)
mp.spawn(proc_fn, nprocs=world_size)
assert len(os.listdir(dir_name)) == 8
global_meta = torch.load(os.path.join(dir_name, GLOBAL_META_FILE_NAME))
assert len(global_meta['meta']) == 2
for rank, meta_name in enumerate(global_meta['meta']):
meta = torch.load(os.path.join(dir_name, meta_name))
assert meta.get('dist_meta', None) is not None
assert len(meta['model']) == 1 and len(meta['optimizer']) == 1
model_state_dict = torch.load(os.path.join(dir_name, meta['model'][0]))
assert len(model_state_dict) == 2
optimizer_state_dict = torch.load(os.path.join(dir_name, meta['optimizer'][0]))
assert len(optimizer_state_dict['state']) == 2
assert 'param_groups' in optimizer_state_dict
if __name__ == '__main__':
test_overwrite()
test_save_global()
test_save_global_shard()
test_save_dist()
|
import torch
import torch.nn as nn
from colossalai.utils.checkpoint_io.meta import ParamDistMeta
from colossalai.utils.checkpoint_io.utils import build_checkpoints
from torch.optim import Adam
class DummyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(20, 1)
def test_global_model():
model = DummyModel()
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(0, model)
assert len(model_checkpoints) == 1
assert len(optimizer_checkpoints) == 0
assert meta['dist_meta'] is None
orig_state_dict = model.state_dict()
global_state_dict = model_checkpoints[0]
assert set(orig_state_dict.keys()) == set(global_state_dict.keys())
for k, v in orig_state_dict.items():
assert torch.equal(v, global_state_dict[k])
def test_global_model_shard():
model = DummyModel()
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(80, model)
assert len(model_checkpoints) == 2
assert len(optimizer_checkpoints) == 0
assert meta['dist_meta'] is None
orig_state_dict = model.state_dict()
assert set(orig_state_dict.keys()) == set(model_checkpoints[0].keys()) | set(model_checkpoints[1].keys())
assert len(set(model_checkpoints[0].keys()) & set(model_checkpoints[1].keys())) == 0
for k, v in orig_state_dict.items():
for state_dict in model_checkpoints:
if k in state_dict:
assert torch.equal(v, state_dict[k])
def test_global_optimizer():
model = DummyModel()
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(0, model, optimizer)
assert len(optimizer_checkpoints) == 1
assert meta['param_to_os'] == {'fc.weight': 0, 'fc.bias': 1}
for state in meta['paired_os'].values():
for k, is_paired in state.items():
if k == 'step':
assert not is_paired
else:
assert is_paired
orig_state_dict = optimizer.state_dict()
state_dict = optimizer_checkpoints[0]
for k, orig_state in orig_state_dict['state'].items():
state = state_dict['state'][k]
for v1, v2 in zip(orig_state.values(), state.values()):
if isinstance(v2, torch.Tensor):
assert torch.equal(v1, v2)
else:
assert v2 == v2
assert orig_state_dict['param_groups'] == state_dict['param_groups']
def test_global_optimizer_shard():
model = DummyModel()
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(80, model, optimizer)
assert len(optimizer_checkpoints) == 2
assert 'param_groups' in optimizer_checkpoints[0] and 'param_groups' not in optimizer_checkpoints[1]
orig_state_dict = optimizer.state_dict()
assert set(orig_state_dict['state'].keys()) == set(optimizer_checkpoints[0]['state'].keys()) | set(
optimizer_checkpoints[1]['state'].keys())
assert len(set(optimizer_checkpoints[0]['state'].keys()) & set(optimizer_checkpoints[1]['state'].keys())) == 0
for k, orig_state in orig_state_dict['state'].items():
state = optimizer_checkpoints[0]['state'][k] if k in optimizer_checkpoints[0][
'state'] else optimizer_checkpoints[1]['state'][k]
for v1, v2 in zip(orig_state.values(), state.values()):
if isinstance(v2, torch.Tensor):
assert torch.equal(v1, v2)
else:
assert v1 == v2
assert orig_state_dict['param_groups'] == optimizer_checkpoints[0]['param_groups']
def test_dist_model_optimizer():
model = DummyModel()
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
dist_meta = {'fc.weight': ParamDistMeta(0, 2, 0, 1), 'fc.bias': ParamDistMeta(1, 2, 0, 1)}
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(0, model, optimizer, dist_meta=dist_meta)
assert dist_meta == meta['dist_meta']
assert len(model_checkpoints) == 1
assert len(optimizer_checkpoints) == 1
assert 'fc.weight' in model_checkpoints[0] and 'fc.bias' in model_checkpoints[0]
assert 0 in optimizer_checkpoints[0]['state'] and 1 in optimizer_checkpoints[0]['state']
dist_meta = {'fc.weight': ParamDistMeta(1, 2, 0, 1), 'fc.bias': ParamDistMeta(1, 2, 0, 1)}
model_checkpoints, optimizer_checkpoints, meta = build_checkpoints(0, model, optimizer, dist_meta=dist_meta)
assert dist_meta == meta['dist_meta']
assert len(model_checkpoints) == 1
assert len(optimizer_checkpoints) == 1
if __name__ == '__main__':
test_global_model()
test_global_model_shard()
test_global_optimizer()
test_global_optimizer_shard()
test_dist_model_optimizer()
|
import os
from functools import partial
from tempfile import TemporaryDirectory
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.checkpoint_io.constant import GLOBAL_META_FILE_NAME
from colossalai.utils.checkpoint_io.io import redist, save
from colossalai.utils.checkpoint_io.meta import (ParamDistMeta, ParamRedistMeta, PipelineRedistMeta, RankRedistMeta,
RedistMeta)
from torch.optim import Adam
class DummyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(20, 1)
def prepare_model_optim(shard: bool = False, zero: bool = False):
model = DummyModel()
if shard:
model.fc.weight.data = model.fc.weight.chunk(2, 1)[dist.get_rank() % 2]
if zero:
dp_rank = dist.get_rank() // 2
model.fc.weight.data = model.fc.weight.reshape(-1).split([3, model.fc.weight.size(1) - 3], 0)[dp_rank]
if dp_rank != 0:
model.fc.bias.data = torch.empty(0, dtype=model.fc.bias.dtype)
for p in model.parameters():
p.grad = torch.ones_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
return model, optimizer
def get_dist_metas(nprocs: int, zero: bool = False):
dp_world_size = nprocs // 2
dist_metas = []
for rank in range(nprocs):
if zero:
dist_metas.append({
'fc.weight':
ParamDistMeta(rank // 2,
dp_world_size,
rank % 2,
2,
tp_shard_dims=[1],
tp_num_parts=[2],
zero_numel=10,
zero_orig_shape=[1, 10]),
'fc.bias':
ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1])
})
else:
dist_metas.append({
'fc.weight': ParamDistMeta(rank // 2, dp_world_size, rank % 2, 2, tp_shard_dims=[1], tp_num_parts=[2]),
'fc.bias': ParamDistMeta(rank // 2, dp_world_size, 0, 1)
})
return dist_metas
def get_redist_meta(nprocs: int):
dp_world_size = nprocs // 2
rank_meta = {
'fc.weight': {rank: RankRedistMeta(rank // 2, rank % 2, 0) for rank in range(nprocs)},
'fc.bias': {rank: RankRedistMeta(rank // 2, 0, 0) for rank in range(nprocs)}
}
param_meta = {
'fc.weight': ParamRedistMeta(dp_world_size, 2, tp_shard_dims=[1], tp_num_parts=[2]),
'fc.bias': ParamRedistMeta(dp_world_size, 1)
}
return RedistMeta(rank_meta, [], param_meta)
def check_checkpoint_shape(dir_name: str):
global_meta = torch.load(os.path.join(dir_name, GLOBAL_META_FILE_NAME))
for meta_name in global_meta['meta']:
meta = torch.load(os.path.join(dir_name, meta_name))
assert meta['dist_meta'] is not None
assert len(meta['params']) == 2
assert len(meta['model']) == 1 and len(meta['optimizer']) == 1
model_state_dict = torch.load(os.path.join(dir_name, meta['model'][0]))
assert len(model_state_dict) == 2
assert model_state_dict['fc.weight'].size(1) == 10
optimizer_state_dict = torch.load(os.path.join(dir_name, meta['optimizer'][0]))
assert len(optimizer_state_dict['state']) == 2
assert 'param_groups' in optimizer_state_dict and 'state' in optimizer_state_dict
assert optimizer_state_dict['state'][0]['exp_avg'].size(1) == 10
assert optimizer_state_dict['state'][0]['exp_avg_sq'].size(1) == 10
def test_global_to_dist():
model, optimizer = prepare_model_optim()
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer)
with TemporaryDirectory() as output_dir:
redist(dir_name, output_dir, get_redist_meta(4), get_dist_metas(4))
check_checkpoint_shape(output_dir)
def run_dist(rank, world_size, port, func):
colossalai.launch(config={'parallel': {
'tensor': {
'mode': '1d',
'size': 2
}
}},
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
func()
def run_save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
rank = dist.get_rank()
save(dir_name, model, optmizer, dist_meta=get_dist_metas(4, zero)[rank])
@pytest.mark.dist
@pytest.mark.parametrize("zero", [False, True])
@rerun_if_address_is_in_use()
def test_dist_to_dist(zero: bool):
with TemporaryDirectory() as dir_name:
fn = partial(run_save_dist, dir_name, zero)
world_size = 4
proc_fn = partial(run_dist, world_size=world_size, port=free_port(), func=fn)
mp.spawn(proc_fn, nprocs=world_size)
with TemporaryDirectory() as output_dir:
redist(dir_name, output_dir, get_redist_meta(4), get_dist_metas(4))
if not zero:
assert len(os.listdir(output_dir)) == 0
else:
check_checkpoint_shape(output_dir)
if __name__ == '__main__':
test_global_to_dist()
test_dist_to_dist(False)
test_dist_to_dist(True)
|
from colossalai.utils.checkpoint_io.meta import ParamDistMeta
from colossalai.utils.checkpoint_io.constant import GLOBAL_META_FILE_NAME
from colossalai.utils.checkpoint_io.io import save, merge
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from tempfile import TemporaryDirectory
from torch.optim import Adam
from functools import partial
import torch
import os
import pytest
import colossalai
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
class DummyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(20, 1)
def prepare_model_optim(shard: bool = False, zero: bool = False):
model = DummyModel()
if shard:
model.fc.weight.data = model.fc.weight.chunk(2, 1)[dist.get_rank() % 2]
if zero:
dp_rank = dist.get_rank() // 2
model.fc.weight.data = model.fc.weight.reshape(-1).split([3, model.fc.weight.size(1) - 3], 0)[dp_rank]
if dp_rank != 0:
model.fc.bias.data = torch.empty(0, dtype=model.fc.bias.dtype)
for p in model.parameters():
p.grad = torch.ones_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
return model, optimizer
def test_merge_global():
model, optimizer = prepare_model_optim()
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer)
with TemporaryDirectory() as output_dir:
merge(dir_name, output_dir)
assert len(os.listdir(output_dir)) == 0
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer, max_shard_size_gb=80 / 1024**3)
with TemporaryDirectory() as output_dir:
merge(dir_name, output_dir)
assert len(os.listdir(output_dir)) == 0
def run_dist(rank, world_size, port, func):
colossalai.launch(config={'parallel': {
'tensor': {
'mode': '1d',
'size': 2
}
}},
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
func()
def run_save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
rank = dist.get_rank()
dp_world_size = dist.get_world_size() // 2
if not zero:
dist_metas = {
'fc.weight': ParamDistMeta(rank // 2, dp_world_size, rank % 2, 2, tp_shard_dims=[1], tp_num_parts=[2]),
'fc.bias': ParamDistMeta(rank // 2, dp_world_size, 0, 1)
}
else:
dist_metas = {
'fc.weight':
ParamDistMeta(rank // 2,
dp_world_size,
rank % 2,
2,
tp_shard_dims=[1],
tp_num_parts=[2],
zero_numel=10,
zero_orig_shape=[1, 10]),
'fc.bias':
ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1])
}
save(dir_name, model, optmizer, dist_meta=dist_metas)
@pytest.mark.dist
@pytest.mark.parametrize("zero", [False, True])
@rerun_if_address_is_in_use()
def test_merge_tp_dp(zero: bool):
with TemporaryDirectory() as dir_name:
fn = partial(run_save_dist, dir_name, zero)
world_size = 4
proc_fn = partial(run_dist, world_size=world_size, port=free_port(), func=fn)
mp.spawn(proc_fn, nprocs=world_size)
with TemporaryDirectory() as output_dir:
merge(dir_name, output_dir)
assert len(os.listdir(output_dir)) == 5
global_meta = torch.load(os.path.join(output_dir, GLOBAL_META_FILE_NAME))
assert len(global_meta['meta']) == 1
meta = torch.load(os.path.join(output_dir, global_meta['meta'][0]))
assert meta['dist_meta'] is None
assert len(meta['params']) == 2
assert len(meta['model']) == 1 and len(meta['optimizer']) == 1
model_state_dict = torch.load(os.path.join(output_dir, meta['model'][0]))
assert len(model_state_dict) == 2
assert model_state_dict['fc.weight'].size(1) == 20
optimizer_state_dict = torch.load(os.path.join(output_dir, meta['optimizer'][0]))
assert len(optimizer_state_dict['state']) == 2
assert 'param_groups' in optimizer_state_dict and 'state' in optimizer_state_dict
assert optimizer_state_dict['state'][0]['exp_avg'].size(1) == 20
assert optimizer_state_dict['state'][0]['exp_avg_sq'].size(1) == 20
if __name__ == '__main__':
test_merge_global()
test_merge_tp_dp(False)
test_merge_tp_dp(True)
|
import torch
from colossalai.utils.checkpoint_io.meta import ParamRedistMeta
from colossalai.utils.checkpoint_io.distributed import flatten_zero_param, split_tp_param, unmerge_param
def test_flatten_zero_param_even() -> None:
redist_meta = ParamRedistMeta(4, 1, zero_start_dp_rank=0, zero_offsets=[0, 4, 8, 12])
orig_tensor = torch.rand(4, 4)
tensors = list(orig_tensor.reshape(-1).chunk(4))
flat_tensors = flatten_zero_param(orig_tensor, redist_meta)
assert len(tensors) == len(flat_tensors)
for t, st in zip(tensors, flat_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(unmerged_tensors) == 1
unmerged_tensors = unmerged_tensors[0]
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert torch.equal(t, tl)
def test_flatten_zero_param_uneven() -> None:
redist_meta = ParamRedistMeta(4, 1, zero_start_dp_rank=1, zero_offsets=[0, 13])
orig_tensor = torch.rand(4, 4)
tensors = list(orig_tensor.reshape(-1).split([13, 3]))
flat_tensors = flatten_zero_param(orig_tensor, redist_meta)
assert flat_tensors[0].size(0) == 0 and flat_tensors[-1].size(0) == 0
flat_tensors = flat_tensors[1:-1]
assert len(tensors) == len(flat_tensors)
for t, st in zip(tensors, flat_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(unmerged_tensors) == 1
unmerged_tensors = unmerged_tensors[0]
assert unmerged_tensors[0].size(0) == 0 and unmerged_tensors[-1].size(0) == 0
unmerged_tensors = unmerged_tensors[1:-1]
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert torch.equal(t, tl)
def test_split_tp_param_1d_row() -> None:
redist_meta = ParamRedistMeta(1, 4, tp_shard_dims=[0], tp_num_parts=[4])
orig_tensor = torch.rand(4, 4)
tensors = [t.contiguous() for t in orig_tensor.chunk(4, 0)]
split_tensors = split_tp_param(orig_tensor, redist_meta)
assert len(tensors) == len(split_tensors)
for t, st in zip(tensors, split_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert len(tl) == 1
assert torch.equal(t, tl[0])
def test_split_tp_param_1d_col() -> None:
redist_meta = ParamRedistMeta(1, 4, tp_shard_dims=[1], tp_num_parts=[4])
orig_tensor = torch.rand(4, 4)
tensors = [t.contiguous() for t in orig_tensor.chunk(4, 1)]
split_tensors = split_tp_param(orig_tensor, redist_meta)
assert len(tensors) == len(split_tensors)
for t, st in zip(tensors, split_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert len(tl) == 1
assert torch.equal(t, tl[0])
def test_split_tp_param_2d() -> None:
redist_meta = ParamRedistMeta(1, 6, tp_shard_dims=[0, 1], tp_num_parts=[2, 3])
orig_tensor = torch.rand(4, 6)
tensors = [t.contiguous() for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)]
split_tensors = split_tp_param(orig_tensor, redist_meta)
assert len(tensors) == len(split_tensors)
for t, st in zip(tensors, split_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert len(tl) == 1
assert torch.equal(t, tl[0])
def test_split_tp_param_2d_reverse() -> None:
redist_meta = ParamRedistMeta(1, 6, tp_shard_dims=[1, 0], tp_num_parts=[3, 2])
orig_tensor = torch.rand(4, 6)
tensors = [t.contiguous() for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)]
split_tensors = split_tp_param(orig_tensor, redist_meta)
assert len(tensors) == len(split_tensors)
for t, st in zip(tensors, split_tensors):
assert torch.equal(t, st)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(tensors) == len(unmerged_tensors)
for t, tl in zip(tensors, unmerged_tensors):
assert len(tl) == 1
assert torch.equal(t, tl[0])
def test_unmerge_param_hybrid() -> None:
redist_meta = ParamRedistMeta(2,
6,
tp_shard_dims=[1, 0],
tp_num_parts=[3, 2],
zero_start_dp_rank=0,
zero_offsets=[0, 1])
orig_tensor = torch.rand(4, 6)
tensors = [
chunk for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)
for chunk in t.contiguous().reshape(-1).split([1, 3])
]
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(unmerged_tensors) == 6 and len(unmerged_tensors[0]) == 2
for tp_rank in range(6):
for dp_rank in range(2):
assert torch.equal(tensors[tp_rank * 2 + dp_rank], unmerged_tensors[tp_rank][dp_rank])
def test_unmerge_param_dummy() -> None:
redist_meta = ParamRedistMeta(1, 1)
orig_tensor = torch.rand(4, 6)
unmerged_tensors = unmerge_param(orig_tensor, redist_meta)
assert len(unmerged_tensors) == 1 and len(unmerged_tensors[0]) == 1
assert torch.equal(orig_tensor, unmerged_tensors[0][0])
if __name__ == '__main__':
test_flatten_zero_param_even()
test_flatten_zero_param_uneven()
test_split_tp_param_1d_row()
test_split_tp_param_1d_col()
test_split_tp_param_2d()
test_split_tp_param_2d_reverse()
test_unmerge_param_hybrid()
test_unmerge_param_dummy()
|
import torch
from colossalai.utils.checkpoint_io.meta import ParamDistMeta
from colossalai.utils.checkpoint_io.distributed import unflatten_zero_param, gather_tp_param, merge_param
def test_unflatten_zero_param_even() -> None:
dist_metas = [ParamDistMeta(i, 4, 0, 1, zero_numel=16, zero_orig_shape=[4, 4]) for i in range(4)]
orig_tensor = torch.rand(4, 4)
tensors = list(orig_tensor.reshape(-1).chunk(4))
unflattened_tensor = unflatten_zero_param(tensors, dist_metas)
assert torch.equal(orig_tensor, unflattened_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_unflatten_zero_param_uneven() -> None:
dist_metas = [ParamDistMeta(i, 4, 0, 1, zero_numel=16, zero_orig_shape=[4, 4]) for i in range(1, 3)]
orig_tensor = torch.rand(4, 4)
tensors = list(orig_tensor.reshape(-1).split([13, 3]))
unflattened_tensor = unflatten_zero_param(tensors, dist_metas)
assert torch.equal(orig_tensor, unflattened_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_gather_tp_param_1d_row() -> None:
dist_metas = [ParamDistMeta(0, 1, i, 4, tp_shard_dims=[0], tp_num_parts=[4]) for i in range(4)]
orig_tensor = torch.rand(4, 4)
tensors = [t.contiguous() for t in orig_tensor.chunk(4, 0)]
gathered_tensor = gather_tp_param(tensors, dist_metas)
assert torch.equal(orig_tensor, gathered_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_gather_tp_param_1d_col() -> None:
dist_metas = [ParamDistMeta(0, 1, i, 4, tp_shard_dims=[1], tp_num_parts=[4]) for i in range(4)]
orig_tensor = torch.rand(4, 4)
tensors = [t.contiguous() for t in orig_tensor.chunk(4, 1)]
gathered_tensor = gather_tp_param(tensors, dist_metas)
assert torch.equal(orig_tensor, gathered_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_gather_tp_param_2d() -> None:
dist_metas = [ParamDistMeta(0, 1, i, 6, tp_shard_dims=[0, 1], tp_num_parts=[2, 3]) for i in range(6)]
orig_tensor = torch.rand(4, 6)
tensors = [t.contiguous() for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)]
gathered_tensor = gather_tp_param(tensors, dist_metas)
assert torch.equal(orig_tensor, gathered_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_gather_tp_param_2d_reverse() -> None:
dist_metas = [ParamDistMeta(0, 1, i, 6, tp_shard_dims=[1, 0], tp_num_parts=[3, 2]) for i in range(6)]
orig_tensor = torch.rand(4, 6)
tensors = [t.contiguous() for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)]
gathered_tensor = gather_tp_param(tensors, dist_metas)
assert torch.equal(orig_tensor, gathered_tensor)
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_merge_param_hybrid() -> None:
dist_metas = [
ParamDistMeta(i % 2,
2,
i // 2,
6,
tp_shard_dims=[1, 0],
tp_num_parts=[3, 2],
zero_numel=4,
zero_orig_shape=[2, 2]) for i in range(12)
]
orig_tensor = torch.rand(4, 6)
tensors = [
chunk for tl in orig_tensor.chunk(2, 0) for t in tl.chunk(3, 1)
for chunk in t.contiguous().reshape(-1).split([1, 3])
]
merged_tensor = merge_param(tensors, dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
def test_merge_param_dummy() -> None:
dist_metas = [ParamDistMeta(0, 1, 0, 1)]
orig_tensor = torch.rand(4, 6)
merged_tensor = merge_param([orig_tensor], dist_metas)
assert torch.equal(orig_tensor, merged_tensor)
if __name__ == '__main__':
test_unflatten_zero_param_even()
test_unflatten_zero_param_uneven()
test_gather_tp_param_1d_row()
test_gather_tp_param_1d_col()
test_gather_tp_param_2d()
test_gather_tp_param_2d_reverse()
test_merge_param_hybrid()
test_merge_param_dummy()
|
from copy import deepcopy
from functools import partial
from tempfile import TemporaryDirectory
from typing import Dict
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.checkpoint_io.io import load, save
from colossalai.utils.checkpoint_io.meta import (ParamDistMeta, ParamRedistMeta, RankRedistMeta, RedistMeta)
from torch import Tensor
from torch.nn import Module
from torch.optim import Adam, Optimizer
def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None:
assert set(a.keys()) == set(b.keys())
for k, v in a.items():
assert torch.equal(v, b[k])
def check_optim_state_dict(a: dict, b: dict, ignore_param_gruops: bool = False) -> None:
assert set(a['state'].keys()) == set(b['state'].keys())
for k, state in a['state'].items():
b_state = b['state'][k]
for v1, v2 in zip(state.values(), b_state.values()):
if isinstance(v1, Tensor):
assert torch.equal(v1, v2)
else:
assert v1 == v2
if not ignore_param_gruops:
assert a['param_groups'] == b['param_groups']
class DummyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(20, 1)
def prepare_model_optim(shard: bool = False, zero: bool = False):
model = DummyModel()
if shard:
model.fc.weight.data = model.fc.weight.chunk(2, 1)[dist.get_rank() % 2]
if zero:
dp_rank = dist.get_rank() // 2
model.fc.weight.data = model.fc.weight.reshape(-1).split([3, model.fc.weight.size(1) - 3], 0)[dp_rank]
if dp_rank != 0:
model.fc.bias.data = torch.empty(0, dtype=model.fc.bias.dtype)
for p in model.parameters():
p.grad = torch.rand_like(p)
optimizer = Adam(model.parameters(), lr=1e-3)
optimizer.step()
return model, optimizer
def reset_model_optim(model: Module, optimizer: Optimizer, scalar: float = 0.0):
with torch.no_grad():
for p in model.parameters():
p.fill_(scalar)
for state in optimizer.state.values():
for v in state.values():
if isinstance(v, Tensor):
v.fill_(scalar)
def get_dist_metas(nprocs: int, zero: bool = False):
dp_world_size = nprocs // 2
dist_metas = []
for rank in range(nprocs):
if zero:
dist_metas.append({
'fc.weight':
ParamDistMeta(rank // 2,
dp_world_size,
rank % 2,
2,
tp_shard_dims=[1],
tp_num_parts=[2],
zero_numel=10,
zero_orig_shape=[1, 10]),
'fc.bias':
ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1])
})
else:
dist_metas.append({
'fc.weight': ParamDistMeta(rank // 2, dp_world_size, rank % 2, 2, tp_shard_dims=[1], tp_num_parts=[2]),
'fc.bias': ParamDistMeta(rank // 2, dp_world_size, 0, 1)
})
return dist_metas
def get_redist_meta(nprocs: int):
dp_world_size = nprocs // 2
rank_meta = {
'fc.weight': {rank: RankRedistMeta(rank // 2, rank % 2, 0) for rank in range(nprocs)},
'fc.bias': {rank: RankRedistMeta(rank // 2, 0, 0) for rank in range(nprocs)}
}
param_meta = {
'fc.weight': ParamRedistMeta(dp_world_size, 2, tp_shard_dims=[1], tp_num_parts=[2]),
'fc.bias': ParamRedistMeta(dp_world_size, 1)
}
return RedistMeta(rank_meta, [], param_meta)
@pytest.mark.parametrize('max_shard_size_gb', [80 / 1024**3, 0])
def test_save_global_load_global(max_shard_size_gb: float):
model, optimizer = prepare_model_optim()
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer, max_shard_size_gb=max_shard_size_gb)
new_model, new_optimizer = prepare_model_optim()
load(dir_name, new_model, new_optimizer, max_shard_size_gb=max_shard_size_gb)
check_model_state_dict(model.state_dict(), new_model.state_dict())
check_optim_state_dict(optimizer.state_dict(), new_optimizer.state_dict())
def run_dist(rank, world_size, port, func):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
func()
def launch_dist(fn, world_size: int):
proc_fn = partial(run_dist, world_size=world_size, port=free_port(), func=fn)
mp.spawn(proc_fn, nprocs=world_size)
def save_dist(dir_name: str, zero: bool):
model, optmizer = prepare_model_optim(shard=True, zero=zero)
reset_model_optim(model, optmizer)
world_size = dist.get_world_size()
rank = dist.get_rank()
save(dir_name, model, optmizer, dist_meta=get_dist_metas(world_size, zero)[rank])
def load_and_check_dist(dir_name: str):
world_size = dist.get_world_size()
model, optmizer = prepare_model_optim(shard=True)
reset_model_optim(model, optmizer)
model_state_dict = deepcopy(model.state_dict())
optimizer_state_dict = deepcopy(optmizer.state_dict())
reset_model_optim(model, optmizer, 1)
load(dir_name, model, optmizer, get_redist_meta(world_size), get_dist_metas(world_size))
check_model_state_dict(model_state_dict, model.state_dict())
check_optim_state_dict(optimizer_state_dict, optmizer.state_dict())
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_save_global_load_dist():
model, optimizer = prepare_model_optim()
reset_model_optim(model, optimizer)
with TemporaryDirectory() as dir_name:
save(dir_name, model, optimizer)
fn = partial(load_and_check_dist, dir_name)
launch_dist(fn, 4)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_save_dist_load_dist():
with TemporaryDirectory() as dir_name:
# save tp + dp
fn = partial(save_dist, dir_name, False)
launch_dist(fn, 2)
# load tp + dp
fn = partial(load_and_check_dist, dir_name)
launch_dist(fn, 2)
with TemporaryDirectory() as dir_name:
# save tp + zero
fn = partial(save_dist, dir_name, True)
launch_dist(fn, 4)
# load tp + dp
fn = partial(load_and_check_dist, dir_name)
launch_dist(fn, 2)
launch_dist(fn, 4)
if __name__ == '__main__':
test_save_global_load_global(80 / 1024**3)
test_save_global_load_global(0)
test_save_global_load_dist()
test_save_dist_load_dist()
|
import copy
import pytest
import colossalai
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils.model.colo_init_context import ColoInitContext
from functools import partial
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.nn.parallel import ColoDDP
from collections import OrderedDict
from colossalai.tensor import ProcessGroup, ColoParameter
def check_state_dict_equal(state_dict: OrderedDict, other_state_dict: OrderedDict):
for (k1, t1), (k2, t2) in zip(state_dict.items(), other_state_dict.items()):
assert k1 == k2
if t1.device != t2.device:
temp_t2 = t2.to(t1.device)
else:
temp_t2 = t2
assert torch.equal(t1, temp_t2), "\t{}\n\t{}".format(t1, temp_t2)
def init_ddp(module: torch.nn.Module) -> ColoDDP:
pg = ProcessGroup()
return ColoDDP(module, process_group=pg)
def run_ddp_state_dict():
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
torch_model = model_builder().cuda()
with ColoInitContext(device=get_current_device()):
model = model_builder()
model = init_ddp(model)
torch_state_dict = torch_model.state_dict()
for param in model.parameters():
if isinstance(param, ColoParameter):
assert param.get_process_group() is not None
model.load_state_dict(torch_state_dict)
for param in model.parameters():
if isinstance(param, ColoParameter):
assert param.get_process_group() is not None
state_dict = model.state_dict()
check_state_dict_equal(torch_state_dict, state_dict)
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_ddp_state_dict()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_state_dict(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_state_dict(2)
|
import pytest
import colossalai
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from functools import partial
from colossalai.nn.parallel.reducer import Reducer
import torch.distributed as dist
from torch.distributed.distributed_c10d import _get_default_group
REDUCE_CNT = 0
def check_eq(grad, grad_clone):
global REDUCE_CNT
print(f'Rank{dist.get_rank()} check {REDUCE_CNT}')
REDUCE_CNT += 1
assert torch.allclose(grad, grad_clone)
def run_reducer():
grads = [torch.rand(64, i + 1, device=get_current_device()) for i in range(10)]
grads_clone = [g.clone().detach() for g in grads]
for g in grads:
dist.all_reduce(g)
reducer = Reducer(bucket_size_mb=1)
for g, g_clone in zip(grads, grads_clone):
reducer.all_reduce_async(g_clone, _get_default_group(), partial(check_eq, g))
reducer.flush()
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_reducer()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_reducer(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_reducer(2)
|
import os
import random
from functools import partial
from typing import Callable, Type
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import colossalai
from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration
from colossalai.gemini.gemini_mgr import GeminiManager
from colossalai.nn.parallel import ColoDDP, ZeroDDP
from colossalai.tensor import ProcessGroup
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
def set_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def init_ddp(module: torch.nn.Module) -> ColoDDP:
pg = ProcessGroup()
return ColoDDP(module, process_group=pg)
def init_ddpv2(module: torch.nn.Module) -> ZeroDDP:
chunk_config, *_ = search_chunk_configuration(module, 4, 1024)
chunk_manager = ChunkManager(chunk_config)
gemini_manager = GeminiManager('cuda', chunk_manager)
return ZeroDDP(module, gemini_manager)
class Net(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(3, 3, bias=False)
self.fc2 = torch.nn.Linear(3, 1, bias=False)
def forward(self, x):
return self.fc2(self.fc1(x))
def run_fwd_bwd(ddp_cls: Type[ColoDDP], init_ddp_func: Callable[[torch.nn.Module], ColoDDP]):
with ColoInitContext(device=get_current_device()):
model = Net().cuda()
w1 = model.fc1.weight
w2 = model.fc2.weight
ddp_cls.set_params_to_ignore([w2])
model = init_ddp_func(model)
x = torch.rand(2, 3, device=get_current_device())
logits = model(x)
loss = torch.sum(logits)
model.backward(loss)
if ddp_cls is ZeroDDP:
w1s_grad = w1
else:
w1s_grad = w1.grad
w1_grads = [torch.empty_like(w1) for _ in range(dist.get_world_size())]
dist.all_gather(w1_grads, w1s_grad)
assert torch.equal(w1_grads[0], w1_grads[1])
w2_grads = [torch.empty_like(w2) for _ in range(dist.get_world_size())]
dist.all_gather(w2_grads, w2.grad)
assert not torch.equal(w2_grads[0], w2_grads[1])
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
set_seed(dist.get_rank())
run_fwd_bwd(ColoDDP, init_ddp)
run_fwd_bwd(ZeroDDP, init_ddpv2)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [2])
@rerun_if_address_is_in_use()
def test_ddp_ignore_params(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_ddp_ignore_params(2)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import pytest
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
@pytest.mark.cpu
def test_cifar10_dataset():
# build transform
transform_pipeline = [transforms.ToTensor()]
transform_pipeline = transforms.Compose(transform_pipeline)
# build dataset
dataset = datasets.CIFAR10(root=Path(os.environ['DATA']), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = DataLoader(dataset=dataset, batch_size=4, shuffle=True, num_workers=2)
data_iter = iter(dataloader)
img, label = data_iter.next()
if __name__ == '__main__':
test_cifar10_dataset()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import colossalai
from torchvision import transforms, datasets
from colossalai.context import ParallelMode, Config
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader, free_port
from colossalai.testing import rerun_if_address_is_in_use
CONFIG = Config(dict(
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
),
seed=1024,
))
def run_data_sampler(rank, world_size, port):
dist_args = dict(config=CONFIG, rank=rank, world_size=world_size, backend='gloo', port=port, host='localhost')
colossalai.launch(**dist_args)
print('finished initialization')
# build dataset
transform_pipeline = [transforms.ToTensor()]
transform_pipeline = transforms.Compose(transform_pipeline)
dataset = datasets.CIFAR10(root=Path(os.environ['DATA']), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = get_dataloader(dataset, batch_size=8, add_sampler=True)
data_iter = iter(dataloader)
img, label = data_iter.next()
img = img[0]
if gpc.get_local_rank(ParallelMode.DATA) != 0:
img_to_compare = img.clone()
else:
img_to_compare = img
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
if gpc.get_local_rank(ParallelMode.DATA) != 0:
assert not torch.equal(
img, img_to_compare), 'Same image was distributed across ranks but expected it to be different'
torch.cuda.empty_cache()
@pytest.mark.cpu
@rerun_if_address_is_in_use()
def test_data_sampler():
world_size = 4
test_func = partial(run_data_sampler, world_size=world_size, port=free_port())
mp.spawn(test_func, nprocs=world_size)
if __name__ == '__main__':
test_data_sampler()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchvision import transforms, datasets
import colossalai
from colossalai.context import ParallelMode, Config
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader, free_port
from colossalai.testing import rerun_if_address_is_in_use
from torchvision import transforms
CONFIG = Config(
dict(
train_data=dict(
dataset=dict(
type='CIFAR10',
root=Path(os.environ['DATA']),
train=True,
download=True,
),
dataloader=dict(num_workers=2, batch_size=2, shuffle=True),
),
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
),
seed=1024,
))
def run_data_sampler(rank, world_size, port):
dist_args = dict(config=CONFIG, rank=rank, world_size=world_size, backend='gloo', port=port, host='localhost')
colossalai.launch(**dist_args)
# build dataset
transform_pipeline = [transforms.ToTensor(), transforms.RandomCrop(size=32, padding=4)]
transform_pipeline = transforms.Compose(transform_pipeline)
dataset = datasets.CIFAR10(root=Path(os.environ['DATA']), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = get_dataloader(dataset, batch_size=8, add_sampler=False)
data_iter = iter(dataloader)
img, label = data_iter.next()
img = img[0]
if gpc.get_local_rank(ParallelMode.DATA) != 0:
img_to_compare = img.clone()
else:
img_to_compare = img
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
if gpc.get_local_rank(ParallelMode.DATA) != 0:
# this is without sampler
# this should be false if data parallel sampler to given to the dataloader
assert torch.equal(img,
img_to_compare), 'Same image was distributed across ranks and expected it to be the same'
torch.cuda.empty_cache()
@pytest.mark.cpu
@rerun_if_address_is_in_use()
def test_data_sampler():
world_size = 4
test_func = partial(run_data_sampler, world_size=world_size, port=free_port())
mp.spawn(test_func, nprocs=world_size)
if __name__ == '__main__':
test_data_sampler()
|
import torch
from colossalai.auto_parallel.tensor_shard.utils import (
get_broadcast_shape,
is_broadcastable,
recover_sharding_spec_for_broadcast_shape,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.sharding_spec import ShardingSpec
def test_is_broadcastable():
x1 = torch.rand(4, 4, 8)
x2 = torch.rand(1, 8)
assert is_broadcastable(x1.shape, x2.shape)
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(2, 8)
assert is_broadcastable(x1.shape, x2.shape)
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(4, 8)
assert not is_broadcastable(x1.shape, x2.shape)
def test_get_broadcast_shape():
x1 = torch.rand(4, 4, 8)
x2 = torch.rand(1, 8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 4, 8]
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(2, 8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 2, 8]
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 2, 8]
def test_recover_sharding_spec_for_broadcast_shape():
x1 = torch.rand(4, 1, 8)
x2 = torch.rand(2, 8)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
broadcast_shape = get_broadcast_shape(x1.shape, x2.shape)
logical_sharding_spec_for_x1 = ShardingSpec(device_mesh=device_mesh,
dim_partition_dict={
0: [0],
1: [1]
},
entire_shape=broadcast_shape)
physical_sharding_spec_for_x1, removed_dims = recover_sharding_spec_for_broadcast_shape(
logical_sharding_spec_for_x1, broadcast_shape, x1.shape)
print(physical_sharding_spec_for_x1)
assert physical_sharding_spec_for_x1.entire_shape == x1.shape
# dim 1 for the physical tensor is of broadcast type MULTIPLE, so should ignore
assert physical_sharding_spec_for_x1.dim_partition_dict == {0: [0]}
assert physical_sharding_spec_for_x1.sharding_sequence == ['S0', 'R', 'R']
|
import torch
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationDataType
from colossalai.auto_parallel.tensor_shard.solver import CostGraph, GraphAnalyser, Solver, StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing.pytest_wrapper import run_on_environment_flag
def _param_resharding_cost_assertion(node):
for strategy in node.strategies_vector:
for prev_node, resharding_cost in strategy.resharding_costs.items():
if strategy.get_op_data_by_name(str(prev_node)).type == OperationDataType.PARAM:
for cost in resharding_cost:
assert cost.fwd == 0
assert cost.bwd == 0
assert cost.total == 0
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
x = self.linear(x)
x = x * 2
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias)
def forward(self, x):
x = self.conv(x)
x = x * 2
return x
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_linear_module():
model = LinearModel(4, 8)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %linear_weight : [#users=1] = get_attr[target=linear.weight]
# %linear_bias : [#users=1] = get_attr[target=linear.bias]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%x, %linear_weight), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%linear, %linear_bias), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%add, 2), kwargs = {})
# return mul
graph = tracer.trace(root=model, meta_args={'x': torch.rand(4, 4).to('meta')})
# def forward(self, x : torch.Tensor):
# linear_weight = self.linear.weight
# linear_bias = self.linear.bias
# linear = torch._C._nn.linear(x, linear_weight); x = linear_weight = None
# add = linear + linear_bias; linear = linear_bias = None
# mul = add * 2; add = None
# return mul
gm = ColoGraphModule(model, graph)
gm.recompile()
node_list = list(graph.nodes)
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
linear_node = node_list[3]
_param_resharding_cost_assertion(linear_node)
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_conv_module():
model = ConvModel(3, 6, 2)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %conv_weight : [#users=1] = get_attr[target=conv.weight]
# %conv_bias : [#users=1] = get_attr[target=conv.bias]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%x, %conv_weight), kwargs = {})
# %view : [#users=1] = call_method[target=view](args = (%conv_bias, [1, -1, 1, 1]), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%conv2d, %view), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%add, 2), kwargs = {})
# return mul
graph = tracer.trace(root=model, meta_args={'x': torch.rand(4, 3, 64, 64).to('meta')})
# def forward(self, x : torch.Tensor):
# conv_weight = self.conv.weight
# conv_bias = self.conv.bias
# conv2d = torch.conv2d(x, conv_weight); x = conv_weight = None
# view = conv_bias.view([1, -1, 1, 1]); conv_bias = None
# add = conv2d + view; conv2d = view = None
# mul = add * 2; add = None
# return mul
gm = ColoGraphModule(model, graph)
gm.recompile()
node_list = list(graph.nodes)
conv_node = node_list[3]
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
_param_resharding_cost_assertion(conv_node)
if __name__ == '__main__':
test_linear_module()
test_conv_module()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.solver import GraphAnalyser
from colossalai.fx import ColoGraphModule, ColoTracer
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(4, 4)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(4, 4)
def forward(self, x1, x2):
x1 = x1 * 2
x1 = self.linear1(x1)
x1 = self.relu(x1)
x1 = self.linear2(x1)
out = x1 + x2
return out
def test_liveness_analysis():
model = LinearModel()
tracer = ColoTracer()
graph = tracer.trace(model,
meta_args={
'x1': torch.rand(4, 4, device='meta'),
'x2': torch.rand(4, 4, device='meta')
})
gm = ColoGraphModule(root=model, graph=graph, class_name=model.__class__.__name__)
graph_analyser = GraphAnalyser(gm)
liveness_list = graph_analyser.liveness_analysis()
stage_count = len(liveness_list)
# if a LiveStage is covered by another LiveStage, we just keep the larger one.
assert stage_count == 1
# a variable named `relu` must exist
# and this live var must have inplace = True
assert liveness_list[0].all_live_vars.exists('relu')
relu_var = liveness_list[0].all_live_vars.get('relu')
assert relu_var.is_inplace
# the unique vars must be fewer than the all vars since in-place ops exist
all_live_vars = liveness_list[0].all_live_vars
unique_live_vars = liveness_list[0].unique_live_vars
assert len(unique_live_vars) + 1 == len(all_live_vars)
if __name__ == '__main__':
test_liveness_analysis()
|
import copy
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer import HybridAdam
from colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper
from colossalai.tensor.process_group import ProcessGroup
from colossalai.testing import assert_close, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port, get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext, post_process_colo_init_ctx
class MLP(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.linear_1 = torch.nn.Linear(in_features, 4 * in_features, bias=False)
self.linear_2 = torch.nn.Linear(4 * in_features, in_features, bias=False)
def forward(self, x):
x = self.linear_1(x)
x = self.linear_2(x)
return x
def check_auto_parallel_with_gemini(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = MLP(4).half().cuda()
input = torch.rand(4, 4).half().cuda()
output_compare = model(input)
loss_compare = output_compare.sum()
loss_compare.backward()
grad_compare = copy.deepcopy(model.linear_1.weight.grad)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {'x': torch.rand(4, 4).half().to('meta')}
gm, solution = initialize_model(model,
meta_args=meta_args,
device_mesh=device_mesh,
return_solution=True,
solver_preference='tp',
shard_option='shard_last_axis')
if rank == 0:
msg = '| TP strategy combination chosen by auto-parallel solver |'
msg_length = len(msg)
print('=' * msg_length)
print(msg)
print('=' * msg_length)
for strategy in solution:
print(strategy)
print('=' * msg_length)
dp_process_group = ProcessGroup(rank=rank, ranks=[0, 1, 2, 3], tp_degree=2, dp_degree=2)
gemini_config = dict(strict_ddp_mode=False,
device=get_current_device(),
placement_policy='cpu',
pin_memory=True,
search_range_mb=128)
post_process_colo_init_ctx(gm, device=get_current_device(), default_pg=dp_process_group)
gm = zero_model_wrapper(gm, zero_stage=3, gemini_config=gemini_config)
optimizer = HybridAdam(gm.parameters(), betas=(0, 0))
optimizer = zero_optim_wrapper(gm, optimizer, initial_scale=1)
output = gm(input)
assert_close(output, output_compare)
print(f'output on rank{rank} is correct')
loss = output.sum()
optimizer.zero_grad()
optimizer.backward(loss)
optimizer.step()
if rank in (0, 2):
assert_close(list(optimizer.optim.state.values())[0]['exp_avg'].half(), grad_compare.narrow(0, 0, 8).flatten())
if rank in (1, 3):
assert_close(list(optimizer.optim.state.values())[0]['exp_avg'].half(), grad_compare.narrow(0, 8, 8).flatten())
print(f'gradient on rank{rank} is correct')
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_auto_parallel_with_gemini():
world_size = 4
run_func = partial(check_auto_parallel_with_gemini, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_auto_parallel_with_gemini()
|
import torch
from torch.fx import GraphModule
from torchvision.models import resnet50
from colossalai.auto_parallel.tensor_shard.constants import BATCHNORM_MODULE_OP
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.solver import CostGraph, GraphAnalyser, Solver, StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx.tracer.tracer import ColoTracer
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.testing.pytest_wrapper import run_on_environment_flag
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_cost_graph():
physical_mesh_id = torch.arange(0, 8)
mesh_shape = (2, 4)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
shape_consistency_manager = ShapeConsistencyManager()
tracer = ColoTracer()
model = resnet50(num_classes=100000)
input_sample = {'x': torch.rand(128, 3, 224, 224).to('meta')}
graph = tracer.trace(root=model, meta_args=input_sample)
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %conv1 : [#users=1] = call_module[target=conv1](args = (%x,), kwargs = {})
# %bn1 : [#users=1] = call_module[target=bn1](args = (%conv1,), kwargs = {})
# %relu : [#users=1] = call_module[target=relu](args = (%bn1,), kwargs = {})
# %maxpool : [#users=2] = call_module[target=maxpool](args = (%relu,), kwargs = {})
# %layer1_0_conv1 : [#users=1] = call_module[target=layer1.0.conv1](args = (%maxpool,), kwargs = {})
# %layer1_0_bn1 : [#users=1] = call_module[target=layer1.0.bn1](args = (%layer1_0_conv1,), kwargs = {})
# %layer1_0_relu : [#users=1] = call_module[target=layer1.0.relu](args = (%layer1_0_bn1,), kwargs = {})
# %layer1_0_conv2 : [#users=1] = call_module[target=layer1.0.conv2](args = (%layer1_0_relu,), kwargs = {})
# %layer1_0_bn2 : [#users=1] = call_module[target=layer1.0.bn2](args = (%layer1_0_conv2,), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%layer1_0_bn2, %maxpool), kwargs = {})
# %layer1_0_relu_1 : [#users=2] = call_module[target=layer1.0.relu](args = (%add,), kwargs = {})
# %layer1_1_conv1 : [#users=1] = call_module[target=layer1.1.conv1](args = (%layer1_0_relu_1,), kwargs = {})
# %layer1_1_bn1 : [#users=1] = call_module[target=layer1.1.bn1](args = (%layer1_1_conv1,), kwargs = {})
# %layer1_1_relu : [#users=1] = call_module[target=layer1.1.relu](args = (%layer1_1_bn1,), kwargs = {})
# %layer1_1_conv2 : [#users=1] = call_module[target=layer1.1.conv2](args = (%layer1_1_relu,), kwargs = {})
# %layer1_1_bn2 : [#users=1] = call_module[target=layer1.1.bn2](args = (%layer1_1_conv2,), kwargs = {})
# %add_1 : [#users=1] = call_function[target=operator.add](args = (%layer1_1_bn2, %layer1_0_relu_1), kwargs = {})
# ...
# %avgpool : [#users=1] = call_module[target=avgpool](args = (%layer4_2_relu_1,), kwargs = {})
# %flatten : [#users=1] = call_function[target=torch.flatten](args = (%avgpool, 1), kwargs = {})
# %fc : [#users=1] = call_module[target=fc](args = (%flatten,), kwargs = {})
# return fc
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
graph_analyser = GraphAnalyser(gm)
liveness_list = graph_analyser.liveness_analysis()
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
cost_graph = CostGraph(strategies_constructor.leaf_strategies)
cost_graph.simplify_graph()
solver = Solver(gm.graph, strategies_constructor, cost_graph, graph_analyser)
ret = solver.call_solver_serialized_args()
print(ret[0])
print(solver.last_s_val)
strategies_list = solver.last_s_val
computation_cost = 0
communication_cost = 0
communication_cost_bn = 0
memory_cost = 0
for index, node in enumerate(graph.nodes):
if node.op == 'call_module':
submod = node.graph.owning_module.get_submodule(node.target)
if type(submod) in BATCHNORM_MODULE_OP:
communication_cost_bn += node.strategies_vector[strategies_list[index]].communication_cost.total
print(node.name, node.strategies_vector[strategies_list[index]].name)
computation_cost += node.strategies_vector[strategies_list[index]].compute_cost.total
communication_cost += node.strategies_vector[strategies_list[index]].communication_cost.total
node_memory_cost = node.strategies_vector[strategies_list[index]].memory_cost.total
if isinstance(node_memory_cost, tuple):
node_memory_cost = node_memory_cost[0]
memory_cost += node_memory_cost.activation + node_memory_cost.parameter
print(f'computation cost is {computation_cost}')
print(f'communication cost is {communication_cost}')
print(f'memory cost is {memory_cost}')
print(f'bn communication cost is {communication_cost_bn}')
if __name__ == '__main__':
test_cost_graph()
|
import copy
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
class MLP(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.linear_1 = torch.nn.Linear(in_features, 4 * in_features, bias=False)
self.linear_2 = torch.nn.Linear(4 * in_features, in_features, bias=False)
def forward(self, x):
x = self.linear_1(x)
x = self.linear_2(x)
return x
def check_compatibility_with_ddp(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = MLP(4).cuda()
input = torch.rand(4, 4).cuda()
output_compare = model(input)
loss_compare = output_compare.sum()
loss_compare.backward()
grad_compare = copy.deepcopy(model.linear_1.weight.grad)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {'x': torch.rand(4, 4).to('meta')}
gm, solution = initialize_model(model,
meta_args=meta_args,
device_mesh=device_mesh,
return_solution=True,
solver_preference='tp',
shard_option='shard_last_axis')
msg = '| TP strategy combination chosen by auto-parallel solver |'
msg_length = len(msg)
if rank == 0:
print('=' * msg_length)
print(msg)
print('=' * msg_length)
for strategy in solution:
print(strategy)
print('=' * msg_length)
dp_process_group = None
for (ranks, process_group_handle) in device_mesh.process_groups_dict[0]:
if rank in ranks:
dp_process_group = process_group_handle
assert dp_process_group is not None
gm = DDP(gm, process_group=dp_process_group)
output = gm(input)
assert_close(output, output_compare)
print(f'output on rank{rank} is correct')
loss = output.sum()
loss.backward()
if rank in (0, 2):
assert_close(gm.module.module.linear_1.weight.grad, grad_compare.narrow(0, 0, 8))
if rank in (1, 3):
assert_close(gm.module.module.linear_1.weight.grad, grad_compare.narrow(0, 8, 8))
print(f'gradient on rank{rank} is correct')
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_compatibility_with_ddp():
world_size = 4
run_func = partial(check_compatibility_with_ddp, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_compatibility_with_ddp()
|
import copy
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
class ConvModel(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.conv = nn.Conv2d(c_in, c_out, kernel_size=3, padding=1, bias=False)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x)
return x
def check_apply(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
input = torch.rand(4, 4, 4, 4).cuda()
test_input = copy.deepcopy(input)
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %conv : [#users=1] = call_module[target=conv](args = (%mul,), kwargs = {})
# return conv
model = ConvModel(4, 4).cuda()
test_model = copy.deepcopy(model)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {'x': torch.rand(4, 4, 4, 4).to('meta')}
gm = initialize_model(model, meta_args, device_mesh)
output = gm(input)
origin_output = test_model(test_input)
assert output.equal(origin_output)
origin_loss = origin_output.sum()
loss = output.sum()
origin_loss.backward()
loss.backward()
grad_0 = test_model.conv.weight.grad.narrow(0, 0, 1)
grad_1 = test_model.conv.weight.grad.narrow(0, 1, 1)
grad_2 = test_model.conv.weight.grad.narrow(0, 2, 1)
grad_3 = test_model.conv.weight.grad.narrow(0, 3, 1)
if rank == 0:
assert_close(gm.module.conv.weight.grad.data, grad_0.data)
elif rank == 1:
assert_close(gm.module.conv.weight.grad.data, grad_1.data)
elif rank == 2:
assert_close(gm.module.conv.weight.grad.data, grad_2.data)
elif rank == 3:
assert_close(gm.module.conv.weight.grad.data, grad_3.data)
else:
raise ValueError(f'rank {rank} does not exist.')
# skip this test due to pulp not installed in CI environment
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_apply():
world_size = 4
run_func = partial(check_apply, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_apply()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
x = self.linear(x)
x = x * 2
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
bias=bias)
def forward(self, x):
x = self.conv(x)
x = x * 2
return x
def check_linear_module(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = LinearModel(4, 8).cuda()
input = torch.rand(4, 4).cuda()
output_compare = model(input)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {'x': torch.rand(4, 4).to('meta')}
gm = initialize_model(model, meta_args=meta_args, device_mesh=device_mesh)
output = gm(input)
assert_close(output, output_compare)
def check_conv_module(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = ConvModel(3, 6, 2).cuda()
input = torch.rand(4, 3, 64, 64).cuda()
output_compare = model(input)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {'x': torch.rand(4, 3, 64, 64).to('meta')}
gm = initialize_model(model, meta_args=meta_args, device_mesh=device_mesh)
output = gm(input)
assert_close(output, output_compare)
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_bias_addition_module():
world_size = 4
run_func_linear = partial(check_linear_module, world_size=world_size, port=free_port())
mp.spawn(run_func_linear, nprocs=world_size)
run_func_conv = partial(check_conv_module, world_size=world_size, port=free_port())
mp.spawn(run_func_conv, nprocs=world_size)
if __name__ == '__main__':
test_bias_addition_module()
|
from functools import partial
from typing import Optional, Tuple, Union
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from transformers.pytorch_utils import Conv1D
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.fx.tracer import ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
HIDDEN_SIZE = 16
class GPT2MLPWithCkpt(nn.Module):
def __init__(self, intermediate_size, hidden_size):
super().__init__()
embed_dim = hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = torch.nn.ReLU()
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = checkpoint(self.c_proj, hidden_states)
hidden_states = self.act(hidden_states)
return hidden_states
def check_act_ckpt(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = GPT2MLPWithCkpt(intermediate_size=4 * HIDDEN_SIZE, hidden_size=HIDDEN_SIZE)
input_sample = {
'hidden_states': torch.rand(1, 64, HIDDEN_SIZE).to('meta'),
}
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
gm = initialize_model(model, input_sample, device_mesh)
code = gm.module.graph.python_code('self').src
assert "runtime_comm_spec_apply_1 = colossalai_auto_parallel_passes_runtime_apply_pass_runtime_comm_spec_apply(linear_1, comm_actions_dict, 12, 'linear_1')" in code
assert "view_3 = colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, False, view_1, comm_actions_dict, use_reentrant=True)" in code
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_mlp_layer():
world_size = 4
run_func = partial(check_act_ckpt, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_mlp_layer()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import ShardingStrategy, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
if torch.__version__ >= '1.12.0':
from colossalai.auto_parallel.meta_profiler import MetaInfo, meta_register
class MyModule(nn.Module):
def __init__(self, in_features=64, out_features=128):
super().__init__()
self.fc_weight = nn.Parameter(torch.randn(out_features, in_features))
def forward(self, input):
return nn.functional.linear(input, self.fc_weight)
def _linear_module_mem_test(rank, world_size, port):
"""This function is for linear memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether linear module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.Linear(64, 128, bias=False)).cuda()
input = torch.rand(8, 8, 16, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# memory test
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=1,
strategy_number=13,
input_args=[input],
meta_arg_names=["input"])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_module_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_linear_module_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
def _linear_function_mem_test(rank, world_size, port):
"""This function is for linear memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether linear module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = MyModule().cuda()
input = torch.rand(8, 8, 16, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# memory test
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=2,
strategy_number=24,
input_args=[input],
meta_arg_names=["input"])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_function_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_linear_function_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
# test_linear_module_meta_concrete_info_match()
test_linear_function_meta_concrete_info_match()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.meta_profiler import meta_register
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy, print_results
def _ReLU_module_mem_test(rank, world_size, port):
"""This function is for ReLU memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.ReLU()).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 1
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_ReLU_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_ReLU_module_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason="need pytorch 1.12.0 or higher for aten level operations")
def test_sofmax_meta_info():
meta_func = meta_register.get(torch.nn.functional.softmax)
# construct meta tensors
input_tensor = torch.rand(256, 1024, device="meta")
output_tensor = torch.rand(256, 1024, device="meta")
softmax_dim = 0
# construct operation data
input_data = OperationData(name='input', type=OperationDataType.ARG, data=input_tensor)
output_data = OperationData(name='output', type=OperationDataType.OUTPUT, data=output_tensor)
softmax_dim_data = OperationData(name='softmax_dim', type=OperationDataType.ARG, data=softmax_dim)
# construct args and kwargs
args = [input_data, softmax_dim_data, output_data]
kwargs = {'inplace': False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(256, 1024, device="cuda")
input_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = torch.nn.functional.softmax(input_real_tensor, dim=softmax_dim)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
print_results([input_real_tensor], [output_real_tensor], compute_cost, memory_cost, fwd_allocated, fwd_peak,
bwd_allocated, bwd_peak)
if __name__ == '__main__':
# test_ReLU_meta_concrete_info_match()
test_sofmax_meta_info()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
def _adaptiveavgpool_module_mem_test(rank, world_size, port):
"""This function is for AdaptiveAvgPool memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.AdaptiveAvgPool2d((16, 16))).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target strategies
strategy_number = 1
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_adaptiveavgpool_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_adaptiveavgpool_module_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
def _maxpool_module_mem_test(rank, world_size, port):
"""This function is for MaxPool memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.MaxPool2d((16, 16))).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_maxpool_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_maxpool_module_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
test_adaptiveavgpool_meta_concrete_info_match()
test_maxpool_meta_concrete_info_match()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
class ConvFunctionModule(nn.Module):
def __init__(self, in_channels=4, out_channels=64, kernel_size=3):
super().__init__()
self.conv_weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
def forward(self, input):
return nn.functional.conv2d(input, self.conv_weight)
def _conv_module_mem_test(rank, bias, world_size, port):
"""This function is for conv memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.Conv2d(4, 64, 3, padding=1, bias=bias)).cuda()
input = torch.rand(4, 4, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 16
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_conv_meta_concrete_info_match(bias=False):
world_size = 4
run_func_module = partial(_conv_module_mem_test, bias=bias, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
def _conv_function_mem_test(rank, world_size, port):
"""This function is for conv function memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = ConvFunctionModule().cuda()
input = torch.rand(4, 4, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 2
# total number of target node strategies
strategy_number = 16
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_conv_function_concrete_info_match():
world_size = 4
run_func_module = partial(_conv_function_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
# test_conv_meta_concrete_info_match()
test_conv_function_concrete_info_match()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
class BinaryElementwiseOpModule(nn.Module):
def __init__(self, token=torch.add, shape=64) -> None:
super().__init__()
self.token = token
self.param = nn.Parameter(torch.rand(shape))
def forward(self, input):
return input + self.param
def _binary_elementwise_mem_test(rank, world_size, port):
"""This function is for binary elementwise ops memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = BinaryElementwiseOpModule(token=torch.add, shape=1024).cuda()
input = torch.rand(32, 1024).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 2
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_binary_elementwise_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_binary_elementwise_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
test_binary_elementwise_meta_concrete_info_match()
|
import copy
from pprint import pprint
from typing import Dict, List
import torch
from torch.fx import GraphModule
from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass
from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationDataType, TrainCycleItem
from colossalai.auto_parallel.tensor_shard.solver import StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx.tracer.tracer import ColoTracer
if torch.__version__ >= '1.12.0':
from colossalai.auto_parallel.meta_profiler import MetaInfo
def mem_test_for_node_strategy(rank: int,
model: torch.nn.Module,
device_mesh: DeviceMesh,
node_index: int,
strategy_number: int,
input_args: List[torch.Tensor],
meta_arg_names: List[str],
input_kwargs: Dict[str, torch.Tensor] = {}):
for strategy_index in range(strategy_number):
# We need to copy the model to avoid do backward more than once in same graph
model_to_shard, args_to_shard, kwargs_to_shard = copy.deepcopy(model), copy.deepcopy(input_args), copy.deepcopy(
input_kwargs)
tracer = ColoTracer()
input_sample = {}
for input_arg, meta_arg_name in zip(input_args, meta_arg_names):
input_sample[meta_arg_name] = torch.rand(input_arg.shape).to('meta')
for meta_kwarg_name, input_kwarg in input_kwargs.items():
input_sample[meta_kwarg_name] = torch.rand(input_kwarg.shape).to('meta')
graph = tracer.trace(root=model_to_shard, meta_args=input_sample)
gm = GraphModule(model_to_shard, graph, model_to_shard.__class__.__name__)
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
target_node = list(graph.nodes)[node_index]
# solution construction
# construct the strategy for the target node
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
# construct the strategy for the output node
placeholder_strategy = list(graph.nodes)[-1].strategies_vector[0]
output_key = next(key for key in target_node.strategies_vector[strategy_index].sharding_specs.keys()
if key.type == OperationDataType.OUTPUT)
placeholder_strategy.sharding_specs[output_key] = target_node.strategies_vector[strategy_index].sharding_specs[
output_key]
gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass(
gm, solution, device_mesh, strategies_constructor)
gm = runtime_apply_pass(gm)
gm.recompile()
gm: GraphModule
num_of_strategies = len(target_node.strategies_vector)
if rank == 0:
print("=======================")
print(f"#strategy_index: {strategy_index + 1}/{num_of_strategies}")
pprint(target_node.strategies_vector[strategy_index])
# warmup
with torch.no_grad():
output = gm(*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard)
del output
# forward memory compare
if rank == 0:
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output = gm(*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard)
if rank == 0:
# print forward memory allocated and peak memory stats in kb
print(
f"forward memory allocated: {(torch.cuda.memory_allocated() - mem_stamp0) / 1024} kb, peak memory stats: {(torch.cuda.max_memory_allocated() - mem_stamp0) / 1024} kb"
)
# backward memory compare
grad_tensors = torch.ones_like(output)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output, grad_tensors)
if rank == 0:
# print backward memory allocated and peak memory stats in kb
print(
f"backward memory allocated: {(torch.cuda.memory_allocated() - mem_stamp0) / 1024} kb, peak memory stats: {(torch.cuda.max_memory_allocated() - mem_stamp0) / 1024} kb"
)
# estimated memory
if target_node.op == "call_module":
metainfo = MetaInfo(target_node.strategies_vector[strategy_index],
target_node.graph.owning_module.get_submodule(target_node.target))
else:
metainfo = MetaInfo(target_node.strategies_vector[strategy_index], target_node.target)
print("estimated memory:")
print(
f"forward activation: {metainfo.memory_cost.fwd.activation / 1024} kb, forward param: {metainfo.memory_cost.fwd.parameter / 1024} kb"
)
print(
f"forward temp: {metainfo.memory_cost.fwd.temp / 1024} kb, forward buffer: {metainfo.memory_cost.fwd.buffer / 1024} kb"
)
print(
f"backward activation: {metainfo.memory_cost.bwd.activation / 1024} kb, backward param: {metainfo.memory_cost.bwd.parameter / 1024} kb"
)
print(
f"backward temp: {metainfo.memory_cost.bwd.temp / 1024} kb, backward buffer: {metainfo.memory_cost.bwd.buffer / 1024} kb"
)
print("=======================")
def print_results(input: List[torch.Tensor], output: List[torch.Tensor], compute_cost: TrainCycleItem,
memory_cost: TrainCycleItem, fwd_allocated, fwd_peak, bwd_allocated, bwd_peak):
"""Print the results of the meta information test.
Args:
input (List[torch.Tensor]): input tensors
output (List[torch.Tensor]): output tensors
compute_cost (TrainCycleItem): compute cost estimated by meta_func
memory_cost (TrainCycleItem): memory cost estimated by meta_func
fwd_allocated: real forward memory allocated
fwd_peak: real forward peak memory stats
bwd_allocated: real backward memory allocated
bwd_peak: real backward peak memory stats
"""
print("=====================")
print(f"input shapes: {[tensor.shape for tensor in input]}")
print(f"output shapes: {[tensor.shape for tensor in output]}")
# estimated results
print("Estimated Results")
# compute cost
print("compute_cost:")
print(f" fwd: {compute_cost.fwd}")
print(f" bwd: {compute_cost.bwd}")
# memory cost
print("memory_cost:")
# fwd
print(f" fwd activation: {memory_cost.fwd.activation / 1024} KB")
print(f" fwd buffer: {memory_cost.fwd.buffer / 1024} KB")
print(f" fwd temp: {memory_cost.fwd.temp / 1024} KB")
print(f" fwd parameter: {memory_cost.fwd.parameter / 1024} KB")
# bwd
print(f" bwd activation: {memory_cost.bwd.activation / 1024} KB")
print(f" bwd buffer: {memory_cost.bwd.buffer / 1024} KB")
print(f" bwd temp: {memory_cost.bwd.temp / 1024} KB")
print(f" bwd parameter: {memory_cost.bwd.parameter / 1024} KB")
# actual results
print("Actual Results")
print("memory_cost:")
# fwd
print(f" fwd allocated: {fwd_allocated / 1024} KB")
print(f" fwd peak: {fwd_peak / 1024} KB")
# bwd
print(f" bwd allocated: {bwd_allocated / 1024} KB")
print(f" bwd peak: {bwd_peak / 1024} KB")
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
MemoryCost,
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
TrainCycleItem,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy, print_results
if torch.__version__ >= '1.12.0':
from colossalai.auto_parallel.meta_profiler import MetaInfo, meta_register
def _batchnorm_module_mem_test(rank, world_size, port):
"""This function is for batchnorm memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.BatchNorm2d(128)).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=['input'])
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_batchnorm_meta_concrete_info_match():
world_size = 4
run_func_module = partial(_batchnorm_module_mem_test, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason='need pytorch 1.12.0 or higher for aten level operations')
@parameterize('tensor_shape', [
[256, 1024],
[1024, 256],
])
def test_layernorm_meta_info(tensor_shape):
meta_func = meta_register.get(torch.nn.LayerNorm)
# construct input
input_tensor = torch.rand(*tensor_shape, device="meta")
output_tensor = torch.rand(*tensor_shape, device="meta")
weight_tensor = torch.rand(tensor_shape[1], device="meta")
bias_tensor = torch.rand(tensor_shape[1], device="meta")
# construct operation data
input_data = OperationData(name="input", type=OperationDataType.ARG, data=input_tensor)
output_data = OperationData(name="output", type=OperationDataType.OUTPUT, data=output_tensor)
weight_data = OperationData(name="weight", type=OperationDataType.PARAM, data=weight_tensor)
bias_data = OperationData(name="bias", type=OperationDataType.PARAM, data=bias_tensor)
# construct args and kwargs
args = [input_data, output_data, weight_data, bias_data]
kwargs = {'inplace': False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(*tensor_shape, device="cuda:0")
input_real_tensor.requires_grad = True
ln_module = torch.nn.LayerNorm(tensor_shape[1]).cuda()
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = ln_module(input_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
compute_cost: TrainCycleItem
memory_cost: TrainCycleItem
print_results([input_real_tensor], [output_real_tensor], compute_cost, memory_cost, fwd_allocated, fwd_peak,
bwd_allocated, bwd_peak)
if __name__ == '__main__':
test_batchnorm_meta_concrete_info_match()
test_layernorm_meta_info()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
MemoryCost,
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
TrainCycleItem,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
if torch.__version__ >= '1.12.0':
from colossalai.auto_parallel.meta_profiler import MetaInfo, meta_register
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason="need pytorch 1.12.0 or higher for aten level operations")
@parameterize(
'tensor_shapes',
[
[[128], [128]], # dot product
[[64, 128], [128]], # mat-vec
[[128], [128, 64]], # vec-mat
[[64, 64, 128], [128]], # batched mat-vec
[[128], [64, 128, 64]], # vec-batched mat
[[64, 128], [128, 192]], # mat-mat
[[64, 64, 128], [128, 192]], # batched mat-mat
[[64, 128], [64, 128, 192]], # mat-batched mat
[[64, 64, 128], [64, 128, 192]], # batched mat-batched mat (matched batch dims)
[[64, 1, 64, 128], [64, 128, 192]], # batched mat-batched mat (unmatched batch dims)
])
def test_matmul_function_meta_info(tensor_shapes):
meta_func = meta_register.get(torch.matmul)
# construct meta tensors
input_tensor = torch.rand(*tensor_shapes[0], device="meta")
other_tensor = torch.rand(*tensor_shapes[1], device="meta")
output_tensor = torch.matmul(input_tensor, other_tensor)
# construct operation data
input_data = OperationData(
name="input",
data=input_tensor,
type=OperationDataType.ARG,
logical_shape=input_tensor.shape,
)
other_data = OperationData(
name="other",
data=other_tensor,
type=OperationDataType.ARG,
logical_shape=other_tensor.shape,
)
output_data = OperationData(
name="output",
data=output_tensor,
type=OperationDataType.OUTPUT,
logical_shape=output_tensor.shape,
)
# construct args and kwargs
args = [input_data, other_data, output_data]
kwargs = {'inplace': False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(*tensor_shapes[0], device="cuda:0")
other_real_tensor = torch.rand(*tensor_shapes[1], device="cuda:0")
input_real_tensor.requires_grad = True
other_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = torch.matmul(input_real_tensor, other_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
compute_cost: TrainCycleItem
memory_cost: TrainCycleItem
print_results([input_real_tensor, other_real_tensor], [output_real_tensor], compute_cost, memory_cost,
fwd_allocated, fwd_peak, bwd_allocated, bwd_peak)
if __name__ == '__main__':
test_matmul_function_meta_info()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.getattr_handler import GetattrHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
class GetattrModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(4, 16, 3, padding=1, bias=False)
def forward(self, input):
weight = self.conv.weight
return weight
def test_getattr_handler():
model = GetattrModel()
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=0] = placeholder[target=input]
# %conv_weight : [#users=1] = get_attr[target=conv.weight]
# return conv_weight
graph = tracer.trace(model, meta_args={'input': torch.rand(4, 4, 64, 64).to('meta')})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
getattr_node = list(graph.nodes)[1]
getattr_strategies_vector = StrategiesVector(getattr_node)
# build handler
getattr_handler = GetattrHandler(node=getattr_node,
device_mesh=device_mesh,
strategies_vector=getattr_strategies_vector)
getattr_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = getattr_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['output'].name == "conv_weight"
assert mapping['output'].data.shape == torch.Size((16, 4, 3, 3))
assert mapping['output'].type == OperationDataType.OUTPUT
strategy_name_list = [val.name for val in getattr_handler.strategies_vector]
assert 'get_attr [S0, S1, R, R]' in strategy_name_list
assert 'get_attr [S1, S0, R, R]' in strategy_name_list
assert 'get_attr [S01, R, R, R]' in strategy_name_list
assert 'get_attr [R, S01, R, R]' in strategy_name_list
assert 'get_attr [S0, R, R, R]' in strategy_name_list
assert 'get_attr [R, S0, R, R]' in strategy_name_list
assert 'get_attr [S1, R, R, R]' in strategy_name_list
assert 'get_attr [R, S1, R, R]' in strategy_name_list
assert 'get_attr [R, R, R, R]' in strategy_name_list
if __name__ == '__main__':
test_getattr_handler()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import SplitHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvSplitModel(nn.Module):
def __init__(self, split_size, split_dim):
super().__init__()
self.split_size = split_size
self.split_dim = split_dim
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
split_node = conv_node.split(self.split_size, dim=self.split_dim)
return split_node
class LinearSplitModel(nn.Module):
def __init__(self, split_size, split_dim):
super().__init__()
self.split_size = split_size
self.split_dim = split_dim
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
split_node = linear_node.split(self.split_size, dim=self.split_dim)
return split_node
def check_split_handler(rank, split_size, split_dim, model_cls, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = model_cls(split_size=split_size, split_dim=split_dim).cuda()
if model_cls.__name__ == 'ConvSplitModel':
input = torch.rand(8, 8, 66, 66).to('cuda')
other = torch.rand(16, 8, 3, 3).to('cuda')
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == 'LinearSplitModel':
input = torch.rand(8, 16, 64, 32).to('cuda')
other = torch.rand(64, 32).to('cuda')
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=['input', 'other'],
node_type='following')
tracer = ColoTracer()
if model_cls.__name__ == 'ConvSplitModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %split : [#users=1] = call_method[target=split](args = (%conv2d,), kwargs = {})
# return split
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 8, 66, 66).to('meta'),
"other": torch.rand(16, 8, 3, 3).to('meta'),
})
if model_cls.__name__ == 'LinearSplitModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %split : [#users=1] = call_method[target=split](args = (%linear,), kwargs = {})
# return split
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 16, 64, 32).to('meta'),
"other": torch.rand(64, 32).to('meta'),
})
gm = ColoGraphModule(model, graph)
previous_mod_node = list(graph.nodes)[2]
split_node = list(graph.nodes)[3]
split_strategies_vector = StrategiesVector(split_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == 'ConvSplitModel':
conv_handler = ConvFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
if model_cls.__name__ == 'LinearSplitModel':
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
split_handler = SplitHandler(node=split_node, device_mesh=device_mesh, strategies_vector=split_strategies_vector)
split_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = split_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == 'ConvSplitModel':
assert mapping['input'].name == "conv2d"
else:
assert mapping['input'].name == "linear"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping['output'].name == "split"
split_items = torch.empty([8, 16, 64, 64]).split(split_size, split_dim)
assert mapping['output'].logical_shape == tuple([item.shape for item in split_items])
assert mapping['output'].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(split_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in split_strategies_vector]
if model_cls.__name__ == 'ConvSplitModel':
if split_dim == 0:
assert '[R, S1, R, R]_0' in strategy_name_list
assert '[R, S0, R, R]_1' in strategy_name_list
assert '[R, R, R, R]_2' in strategy_name_list
assert '[R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, R]_4' in strategy_name_list
assert '[R, R, R, R]_5' in strategy_name_list
assert '[R, S1, R, R]_6' in strategy_name_list
assert '[R, S0, R, R]_7' in strategy_name_list
assert '[R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R]_10' in strategy_name_list
assert '[R, S1, R, R]_11' in strategy_name_list
assert '[R, R, R, R]_12' in strategy_name_list
assert '[R, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R]_15' in strategy_name_list
if split_dim == 1:
assert '[S0, R, R, R]_0' in strategy_name_list
assert '[S1, R, R, R]_1' in strategy_name_list
assert '[S0, R, R, R]_2' in strategy_name_list
assert '[S1, R, R, R]_3' in strategy_name_list
assert '[S0, R, R, R]_4' in strategy_name_list
assert '[S1, R, R, R]_5' in strategy_name_list
assert '[R, R, R, R]_6' in strategy_name_list
assert '[R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R]_9' in strategy_name_list
assert '[R, R, R, R]_10' in strategy_name_list
assert '[R, R, R, R]_11' in strategy_name_list
assert '[R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R]_14' in strategy_name_list
assert '[R, R, R, R]_15' in strategy_name_list
if model_cls.__name__ == 'LinearSplitModel':
if split_dim == 0:
assert '[R, R, R, S1]_11' in strategy_name_list
assert '[R, S0, R, S1]_12' in strategy_name_list
assert '[R, R, S0, S1]_13' in strategy_name_list
assert '[R, R, R, S0]_14' in strategy_name_list
assert '[R, S1, R, S0]_15' in strategy_name_list
assert '[R, R, S1, S0]_16' in strategy_name_list
assert '[R, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R]_18' in strategy_name_list
assert '[R, R, S0, R]_19' in strategy_name_list
assert '[R, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R]_21' in strategy_name_list
assert '[R, R, S1, R]_22' in strategy_name_list
assert '[R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1]_5' in strategy_name_list
assert '[R, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R]_1' in strategy_name_list
assert '[R, R, S01, R]_2' in strategy_name_list
assert '[R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01]_4' in strategy_name_list
if split_dim == 1:
assert '[S0, R, R, S1]_11' in strategy_name_list
assert '[R, R, R, S1]_12' in strategy_name_list
assert '[R, R, S0, S1]_13' in strategy_name_list
assert '[S1, R, R, S0]_14' in strategy_name_list
assert '[R, R, R, S0]_15' in strategy_name_list
assert '[R, R, S1, S0]_16' in strategy_name_list
assert '[S0, R, R, R]_17' in strategy_name_list
assert '[R, R, R, R]_18' in strategy_name_list
assert '[R, R, S0, R]_19' in strategy_name_list
assert '[S1, R, R, R]_20' in strategy_name_list
assert '[R, R, R, R]_21' in strategy_name_list
assert '[R, R, S1, R]_22' in strategy_name_list
assert '[R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1]_5' in strategy_name_list
assert '[S01, R, R, R]_0' in strategy_name_list
assert '[R, R, R, R]_1' in strategy_name_list
assert '[R, R, S01, R]_2' in strategy_name_list
assert '[R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01]_4' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize('split_size', [2])
@parameterize('split_dim', [0, 1, 2])
@parameterize('model_cls', [ConvSplitModel, LinearSplitModel])
def test_split_handler(split_size, split_dim, model_cls):
world_size = 4
run_func = partial(check_split_handler,
split_size=split_size,
split_dim=split_dim,
model_cls=model_cls,
world_size=world_size,
port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_split_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import DefaultReshapeHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing.pytest_wrapper import run_on_environment_flag
class ReshapeModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other)
reshape_node = conv_node.view(2, -1)
return reshape_node
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_reshape_handler():
model = ReshapeModel()
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %view : [#users=1] = call_method[target=view](args = (%conv2d, 2, -1), kwargs = {})
# return view
graph = tracer.trace(model,
meta_args={
"input": torch.rand(4, 4, 64, 64).to('meta'),
"other": torch.rand(4, 16, 3, 3).to('meta'),
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
conv_mod_node = list(graph.nodes)[2]
reshape_node = list(graph.nodes)[3]
reshape_strategies_vector = StrategiesVector(reshape_node)
conv_strategies_vector = StrategiesVector(conv_mod_node)
# build handler
conv_handler = ConvFunctionHandler(node=conv_mod_node,
device_mesh=device_mesh,
strategies_vector=conv_strategies_vector)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(conv_mod_node, 'strategies_vector', conv_strategies_vector)
reshape_handler = DefaultReshapeHandler(node=reshape_node,
device_mesh=device_mesh,
strategies_vector=reshape_strategies_vector)
reshape_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = reshape_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['input'].name == "conv2d"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([4, 4, 62, 62])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([4, 4, 62, 62])
assert mapping['output'].name == "view"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size([2, 30752])
assert mapping['output'].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(reshape_strategies_vector) == len(conv_strategies_vector)
if __name__ == '__main__':
test_reshape_handler()
|
import pytest
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.matmul_handler import (
MatMulHandler,
MatMulType,
_get_bmm_logical_shape,
get_matmul_type,
)
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing.utils import parameterize
class MatMulModule(nn.Module):
def forward(self, x1, x2):
return torch.matmul(x1, x2)
@pytest.mark.skipif(torch.__version__ < '1.12.0', reason="need pytorch 1.12.0 or higher for aten level operations")
@parameterize(
'tensor_shapes',
[
[[8], [8]], # dot product
[[4, 8], [8]], # mat-vec product
[[4, 8], [8, 16]], # mat-mat product
[[8], [8, 16]], # mat-mat product
[[8], [4, 8, 16]], # batched mat-mat product with padding + broadcasting
[[4, 8, 16], [16]], # batched mat-mat product with padding + broadcasting
[[4, 8, 16], [16, 32]], # batched mat-mat product with broadcasting
[[4, 8, 16], [1, 16, 32]], # batched mat-mat product with broadcasting
[[8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[1, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[1, 4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[2, 1, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[2, 4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product without broadcasting
])
def test_matmul_node_handler(tensor_shapes):
input_shape, other_shape = tensor_shapes
# get output shape
x1 = torch.rand(*input_shape)
x2 = torch.rand(*other_shape)
output_shape = list(torch.matmul(x1, x2).shape)
# get matmul type
matmul_type = get_matmul_type(x1.dim(), x2.dim())
model = MatMulModule()
tracer = ColoTracer()
graph = tracer.trace(model, meta_args={"x1": x1.to('meta'), 'x2': x2.to('meta')})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
print(graph)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
mod_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(mod_node)
# build handler
handler = MatMulHandler(node=mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
logical_input_shape = input_shape
logical_other_shape = other_shape
logical_output_shape = output_shape
if matmul_type == MatMulType.MM and len(input_shape) == 1:
logical_input_shape = [1] + input_shape
elif matmul_type == MatMulType.BMM:
logical_input_shape, logical_other_shape, logical_output_shape = _get_bmm_logical_shape(
input_shape, other_shape, handler.transforms)
else:
logical_input_shape = input_shape
# check input operation data
assert mapping['input'].name == "x1"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size(input_shape)
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size(logical_input_shape)
# check other operation data
assert mapping['other'].name == "x2"
assert mapping['other'].data.is_meta
assert mapping['other'].data.shape == torch.Size(other_shape)
assert mapping['other'].type == OperationDataType.ARG
assert mapping['other'].logical_shape == torch.Size(logical_other_shape)
# check output
assert mapping['output'].name == "matmul"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size(output_shape)
assert mapping['output'].type == OperationDataType.OUTPUT
assert mapping['output'].logical_shape == torch.Size(logical_output_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# ensure there is no duplicate strategy
if matmul_type != MatMulType.BMM:
assert len(set(strategy_name_list)) == len(strategy_name_list), strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('x1')
other_sharding_spec = strategy.get_sharding_spec_by_name('x2')
output_sharding_spec = strategy.get_sharding_spec_by_name('matmul')
if matmul_type == MatMulType.DOT:
# dot product will produce a scaler
# results should fulfill:
# 1. the input and other operands have the same sharding spec
# 2. the output has no sharding
assert input_sharding_spec.sharding_sequence == other_sharding_spec.sharding_sequence
assert len(output_sharding_spec.sharding_sequence) == 0
elif matmul_type == MatMulType.MV:
# matrix-vector product should fulfill
# 1. the last dim of the input and other operands should have the same sharding
# 2. the first dim of the input and other should have the same sharding
# 3. the output should have only 1 dim
assert input_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[-1]
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert len(output_sharding_spec.sharding_sequence) == 1
elif matmul_type == MatMulType.MM:
# matrix-matrix multiplication should fulfil
# 1. if input is a 2D tensor, the 1st dim of input and output should have the same sharding
# 2. the input's last dim and the first dim of the other should have the same sharding
# 3. the last dim of the output and other should have the same sharding
# 4. the input and output should have the same number of dims
if len(input_shape) == 2:
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[0]
assert output_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[-1]
assert len(input_sharding_spec.sharding_sequence) == len(output_sharding_spec.sharding_sequence)
elif matmul_type == MatMulType.BMM:
# bmm should fulfil
# 1. of the other tensor is not a 1d tensor, the last dim of other and output have the same sharding
# 2. if the input has more than 2 dim, the second last dim of input and output have the same sharding
# 3. if the other have more than 2 dim, the second last dim of other and the last dim of input should have the same sharding
if len(other_shape) > 1:
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
if len(input_shape) > 1:
assert input_sharding_spec.sharding_sequence[-2] == output_sharding_spec.sharding_sequence[-2]
if len(other_shape) > 2:
assert other_sharding_spec.sharding_sequence[-2] == input_sharding_spec.sharding_sequence[-1]
if __name__ == '__main__':
test_matmul_node_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.output_handler import OutputHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
class OutputModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x * 2
return x, y
@parameterize('output_option', ['distributed', 'replicated'])
@rerun_if_address_is_in_use()
def test_output_handler(output_option):
model = OutputModel()
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=2] = placeholder[target=x]
# %mul : [#users=1] = call_function[target=operator.mul](args = (%x, 2), kwargs = {})
# return (x, mul)
graph = tracer.trace(model, meta_args={
"x": torch.rand(4, 4, 64, 64).to('meta'),
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
output_node = list(graph.nodes)[2]
output_strategies_vector = StrategiesVector(output_node)
# build handler
otuput_handler = OutputHandler(node=output_node,
device_mesh=device_mesh,
strategies_vector=output_strategies_vector,
output_option=output_option)
otuput_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = otuput_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['output'].name == "output"
assert mapping['output'].type == OperationDataType.OUTPUT
strategy_name_list = [val.name for val in otuput_handler.strategies_vector]
if output_option == 'distributed':
assert "Distributed Output" in strategy_name_list
else:
assert "Replica Output" in strategy_name_list
if __name__ == '__main__':
test_output_handler()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.softmax_handler import SoftmaxHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearSplitModel(nn.Module):
def __init__(self, softmax_dim):
super().__init__()
self.softmax_dim = softmax_dim
def forward(self, input, other):
linear_node = F.linear(input, other, bias=None)
softmax_node = F.softmax(linear_node, self.softmax_dim)
return softmax_node
def check_split_handler(rank, softmax_dim, model_cls, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = model_cls(softmax_dim=softmax_dim).cuda()
input = torch.rand(8, 16, 64, 32).to('cuda')
other = torch.rand(64, 32).to('cuda')
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=['input', 'other'],
node_type='following')
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %softmax : [#users=1] = call_method[target=split](args = (%linear,), kwargs = {})
# return split
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 16, 64, 32).to('meta'),
"other": torch.rand(64, 32).to('meta'),
})
gm = ColoGraphModule(model, graph)
previous_mod_node = list(graph.nodes)[2]
split_node = list(graph.nodes)[3]
split_strategies_vector = StrategiesVector(split_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
softmax_handler = SoftmaxHandler(node=split_node,
device_mesh=device_mesh,
strategies_vector=split_strategies_vector)
softmax_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = softmax_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['input'].name == "linear"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping['softmax_dim'].name == "softmax_dim"
assert mapping['softmax_dim'].data == softmax_dim
assert mapping['softmax_dim'].type == OperationDataType.ARG
assert mapping['output'].name == "softmax"
assert mapping['output'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['output'].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping['output'].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(split_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in split_strategies_vector]
if softmax_dim == 0:
assert '[R, R, R, S1] -> [R, R, R, S1]_11' in strategy_name_list
assert '[R, S0, R, S1] -> [R, S0, R, S1]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, R, S0, S1]_13' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_14' in strategy_name_list
assert '[R, S1, R, S0] -> [R, S1, R, S0]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, R, S1, S0]_16' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> [R, S0, R, R]_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, S0, R]_19' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> [R, S1, R, R]_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, S1, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_5' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> [R, S01, R, R]_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, S01, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_4' in strategy_name_list
if softmax_dim == 1:
assert '[S0, R, R, S1] -> [S0, R, R, S1]_11' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, R, S0, S1]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, R, S0]_14' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, R, S1, S0]_16' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_17' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, S0, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_20' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, S1, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_5' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_0' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, S01, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_4' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize('softmax_dim', [0, 1, 2, 3])
@parameterize('model_cls', [LinearSplitModel])
def test_split_handler(softmax_dim, model_cls):
world_size = 4
run_func = partial(check_split_handler,
softmax_dim=softmax_dim,
model_cls=model_cls,
world_size=world_size,
port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_split_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.placeholder_handler import PlaceholderHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
class PlaceholderModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input
@parameterize('placeholder_option', ['distributed', 'replicated'])
@rerun_if_address_is_in_use()
def test_placeholder_handler(placeholder_option):
model = PlaceholderModel()
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# return input_1
graph = tracer.trace(model, meta_args={
"input": torch.rand(4, 4, 64, 64).to('meta'),
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
placeholder_node = list(graph.nodes)[0]
placeholder_strategies_vector = StrategiesVector(placeholder_node)
# build handler
placeholder_handler = PlaceholderHandler(node=placeholder_node,
device_mesh=device_mesh,
strategies_vector=placeholder_strategies_vector,
placeholder_option=placeholder_option)
placeholder_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = placeholder_handler.get_operation_data_mapping()
strategy = placeholder_strategies_vector[0]
strategy_sharding_spec = strategy.get_sharding_spec_by_name(mapping['output'].name)
if placeholder_option == 'distributed':
assert str(strategy_sharding_spec.sharding_sequence) == '[S01, R, R, R]'
else:
assert str(strategy_sharding_spec.sharding_sequence) == '[R, R, R, R]'
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['output'].name == "input_1"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size((4, 4, 64, 64))
assert mapping['output'].type == OperationDataType.OUTPUT
strategy_name_list = [val.name for val in placeholder_handler.strategies_vector]
if placeholder_option == 'replicated':
assert "Replica Placeholder" in strategy_name_list
else:
assert "Distributed Placeholder" in strategy_name_list
if __name__ == '__main__':
test_placeholder_handler()
|
from functools import partial
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.options import ShardOption
from colossalai.auto_parallel.tensor_shard.sharding_strategy import StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing import parameterize
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others, bias=None):
x = nn.functional.linear(input, others, bias=bias)
return x
def check_shard_option(shard_option):
model = LinearModel().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
tracer = ColoTracer()
graph = tracer.trace(model,
meta_args={
"input": torch.rand(4, 4, 4, 16).to('meta'),
'others': torch.rand(32, 16).to('meta')
})
gm = ColoGraphModule(model, graph)
linear_func_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_func_node)
# build handler
handler = LinearFunctionHandler(node=linear_func_node,
device_mesh=device_mesh,
strategies_vector=strategies_vector,
shard_option=shard_option)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
if shard_option == ShardOption.SHARD_LAST_AXIS:
# RR = RS x SR
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS1 = RR x RS1' in strategy_name_list
return
# SS = SR x RS
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
assert 'S0S1 = S0R x RS1_1' in strategy_name_list
assert 'S0S1 = S0R x RS1_2' in strategy_name_list
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S1S0 = S1R x RS0_1' in strategy_name_list
assert 'S1S0 = S1R x RS0_2' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_1' in strategy_name_list
assert 'S0R = S0S1 x S1R_2' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_1' in strategy_name_list
assert 'S1R = S1S0 x S0R_2' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_0' in strategy_name_list
assert 'S01R = S01R x RR_1' in strategy_name_list
assert 'S01R = S01R x RR_2' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
if shard_option == ShardOption.SHARD:
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
if shard_option == ShardOption.STANDARD:
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_shard_option():
# for shard_option in [ShardOption.STANDARD, ShardOption.SHARD, ShardOption.FULL_SHARD, ShardOption.SHARD_LAST_AXIS]:
for shard_option in [ShardOption.SHARD_LAST_AXIS]:
check_shard_option(shard_option)
if __name__ == '__main__':
test_shard_option()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import PermuteHandler, TransposeHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvReshapeModel(nn.Module):
def __init__(self, reshape_dims, call_function):
super().__init__()
self.reshape_dims = reshape_dims
self.call_function = call_function
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
# permute_node = torch.permute(conv_node, self.permute_dims)
if self.call_function == torch.permute:
permute_node = self.call_function(conv_node, self.reshape_dims)
else:
permute_node = self.call_function(conv_node, *self.reshape_dims)
return permute_node
class LinearReshapeModel(nn.Module):
def __init__(self, reshape_dims, call_function):
super().__init__()
self.reshape_dims = reshape_dims
self.call_function = call_function
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
# permute_node = torch.permute(linear_node, self.tgt_shape)
if self.call_function == torch.permute:
permute_node = self.call_function(linear_node, self.reshape_dims)
else:
permute_node = self.call_function(linear_node, *self.reshape_dims)
return permute_node
def check_view_handler(rank, call_function, reshape_dims, model_cls, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
if call_function == torch.permute:
reshape_dims = reshape_dims[0]
elif call_function == torch.transpose:
reshape_dims = reshape_dims[1]
model = model_cls(reshape_dims, call_function).cuda()
if model_cls.__name__ == 'ConvReshapeModel':
input = torch.rand(8, 8, 66, 66).to('cuda')
other = torch.rand(16, 8, 3, 3).to('cuda')
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == 'LinearReshapeModel':
input = torch.rand(8, 16, 64, 32).to('cuda')
other = torch.rand(64, 32).to('cuda')
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=['input', 'other'],
node_type='following')
tracer = ColoTracer()
if model_cls.__name__ == 'ConvReshapeModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {bias: None})
# %permute : [#users=1] = call_function[target=torch.permute](args = (%conv2d, (0, 2, 1, 3)), kwargs = {})
# return permute
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 8, 66, 66).to('meta'),
"other": torch.rand(16, 8, 3, 3).to('meta'),
})
if model_cls.__name__ == 'LinearReshapeModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %permute : [#users=1] = call_method[target=view](args = (%linear, 32, 4, 32, 32, 4), kwargs = {})
# return permute
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 16, 64, 32).to('meta'),
"other": torch.rand(64, 32).to('meta'),
})
gm = ColoGraphModule(model, graph)
previous_mod_node = list(graph.nodes)[2]
reshape_node = list(graph.nodes)[3]
view_strategies_vector = StrategiesVector(reshape_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == 'ConvReshapeModel':
conv_handler = ConvFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
if model_cls.__name__ == 'LinearReshapeModel':
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
if call_function == torch.permute:
reshape_handler = PermuteHandler(node=reshape_node,
device_mesh=device_mesh,
strategies_vector=view_strategies_vector)
else:
reshape_handler = TransposeHandler(node=reshape_node,
device_mesh=device_mesh,
strategies_vector=view_strategies_vector)
reshape_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = reshape_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == 'ConvReshapeModel':
assert mapping['input'].name == "conv2d"
else:
assert mapping['input'].name == "linear"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([8, 16, 64, 64])
if call_function == torch.permute:
assert mapping['output'].name == "permute"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.permute(torch.rand(8, 16, 64, 64), reshape_dims).shape
assert mapping['output'].type == OperationDataType.OUTPUT
else:
assert mapping['output'].name == "transpose"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.transpose(torch.rand(8, 16, 64, 64), *reshape_dims).shape
assert mapping['output'].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(view_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in view_strategies_vector]
if rank == 0:
for name in strategy_name_list:
print(name)
if model_cls.__name__ == 'ConvReshapeModel':
if reshape_dims in ((0, 2, 1, 3), (1, 2)):
assert '[S0, S1, R, R] -> [S0, R, S1, R]_0' in strategy_name_list
assert '[S1, S0, R, R] -> [S1, R, S0, R]_1' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_2' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_3' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_4' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_5' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_6' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_10' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_11' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, S01, R]_15' in strategy_name_list
if reshape_dims == (2, 0, 1, 3):
assert '[S0, S1, R, R] -> [R, S0, S1, R]_0' in strategy_name_list
assert '[S1, S0, R, R] -> [R, S1, S0, R]_1' in strategy_name_list
assert '[S0, R, R, R] -> [R, S0, R, R]_2' in strategy_name_list
assert '[S1, R, R, R] -> [R, S1, R, R]_3' in strategy_name_list
assert '[S0, R, R, R] -> [R, S0, R, R]_4' in strategy_name_list
assert '[S1, R, R, R] -> [R, S1, R, R]_5' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_6' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_10' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_11' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R] -> [R, S01, R, R]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, S01, R]_15' in strategy_name_list
if reshape_dims == (1, 3):
assert '[S0, S1, R, R] -> [S0, R, R, S1]_0' in strategy_name_list
assert '[S1, S0, R, R] -> [S1, R, R, S0]_1' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_2' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_3' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_4' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_5' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, R, S1]_6' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, R, S0]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, R, S0]_10' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, R, S1]_11' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, R, S01]_15' in strategy_name_list
if model_cls.__name__ == 'LinearReshapeModel':
if reshape_dims == ((0, 2, 1, 3), (1, 2)):
assert '[S0, R, R, S1] -> [S0, R, R, S1]_11' in strategy_name_list
assert '[R, S0, R, S1] -> [R, R, S0, S1]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, S0, R, S1]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, R, S0]_14' in strategy_name_list
assert '[R, S1, R, S0] -> [R, R, S1, S0]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, S1, R, S0]_16' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, S0, R, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, S1, R, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_5' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, S01, R]_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, S01, R, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_4' in strategy_name_list
if reshape_dims == (2, 0, 1, 3):
assert '[S0, R, R, S1] -> [R, S0, R, S1]_11' in strategy_name_list
assert '[R, S0, R, S1] -> [R, R, S0, S1]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [S0, R, R, S1]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [R, S1, R, S0]_14' in strategy_name_list
assert '[R, S1, R, S0] -> [R, R, S1, S0]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [S1, R, R, S0]_16' in strategy_name_list
assert '[S0, R, R, R] -> [R, S0, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, S0, R]_18' in strategy_name_list
assert '[R, R, S0, R] -> [S0, R, R, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [R, S1, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, S1, R]_21' in strategy_name_list
assert '[R, R, S1, R] -> [S1, R, R, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_5' in strategy_name_list
assert '[S01, R, R, R] -> [R, S01, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, S01, R]_1' in strategy_name_list
assert '[R, R, S01, R] -> [S01, R, R, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_4' in strategy_name_list
if reshape_dims == (1, 3):
assert '[S0, R, R, S1] -> [S0, S1, R, R]_11' in strategy_name_list
assert '[R, S0, R, S1] -> [R, S1, R, S0]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, S1, S0, R]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, S0, R, R]_14' in strategy_name_list
assert '[R, S1, R, S0] -> [R, S0, R, S1]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, S0, S1, R]_16' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> [R, R, R, S0]_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, S0, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> [R, R, R, S1]_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, S1, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, S1, R, R]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0, R, R]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0, R, R]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, S1, R, R]_5' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> [R, R, R, S01]_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, S01, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, S01, R, R]_4' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize('call_function', [torch.permute, torch.transpose])
@parameterize('reshape_dims', [((0, 2, 1, 3), (1, 2)), ((2, 0, 1, 3), (1, 3))])
@parameterize('model_cls', [ConvReshapeModel, LinearReshapeModel])
def test_view_handler(call_function, reshape_dims, model_cls):
world_size = 4
run_func = partial(check_view_handler,
call_function=call_function,
reshape_dims=reshape_dims,
model_cls=model_cls,
world_size=world_size,
port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_view_handler()
|
from faulthandler import disable
from functools import partial
from xml.dom import WrongDocumentErr
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from typing_extensions import Self
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class AddmmModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, m1, m2):
x = torch.addmm(input, m1, m2, beta=3, alpha=2)
return x
class AddmmModel_with_param(nn.Module):
def __init__(self, weight_shape, bias_shape):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape))
self.bias = torch.nn.Parameter(torch.rand(bias_shape))
def forward(self, m1):
x = torch.addmm(self.bias, m1, self.weight, beta=3, alpha=2)
return x
def check_addmm_function_handler(rank, input_shape, model_cls, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
if model_cls == AddmmModel:
model = AddmmModel().cuda()
else:
model = AddmmModel_with_param(weight_shape=(8, 16), bias_shape=input_shape).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
if model_cls == AddmmModel:
input = torch.rand(input_shape).cuda()
m1 = torch.rand(4, 8).cuda()
m2 = torch.rand(8, 16).cuda()
# construct input args
input_args = [input, m1, m2]
# construct meta arg names
meta_arg_names = ['input', 'm1', 'm2']
meta_args_for_tracer = {}
for meta_arg, input_arg in zip(meta_arg_names, input_args):
meta_args_for_tracer[meta_arg] = input_arg.to('meta')
# the index of addmm node in computation graph
node_index = 4
# strategy number of linear node
strategy_number = 14
else:
m1 = torch.rand(4, 8).cuda()
# construct input args
input_args = [m1]
# construct meta arg names
meta_arg_names = ['m1']
# the index of addmm node in computation graph
meta_args_for_tracer = {}
for meta_arg, input_arg in zip(meta_arg_names, input_args):
meta_args_for_tracer[meta_arg] = input_arg.to('meta')
node_index = 4
# strategy number of linear node
strategy_number = 14
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type='bias_module')
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %m1 : torch.Tensor [#users=1] = placeholder[target=m1]
# %m2 : torch.Tensor [#users=1] = placeholder[target=m2]
# %transpose : [#users=1] = call_function[target=torch.transpose](args = (%m2, 0, 1), kwargs = {})
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%m1, %transpose), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%input_1, 3), kwargs = {})
# %mul_1 : [#users=1] = call_function[target=operator.mul](args = (2, %linear), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%mul_1, %mul), kwargs = {})
# return add
graph = tracer.trace(model, meta_args=meta_args_for_tracer)
gm = ColoGraphModule(model, graph)
# [input_1, m1, m2, addmm, output]
node_list = list(graph.nodes)
linear_node = node_list[4]
strategies_vector = StrategiesVector(linear_node)
# build handler
handler = LinearFunctionHandler(node=linear_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# check operation data mapping
mapping = handler.get_operation_data_mapping()
assert mapping['input'].name == "m1"
assert mapping['input'].data.shape == torch.Size([4, 8])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([4, 8])
assert mapping['other'].name == "transpose"
assert mapping['other'].data.shape == torch.Size([16, 8])
if model_cls == AddmmModel:
assert mapping['other'].type == OperationDataType.ARG
else:
assert mapping['other'].type == OperationDataType.PARAM
assert mapping['other'].logical_shape == torch.Size([8, 16])
assert mapping['output'].name == "linear"
assert mapping['output'].data.shape == torch.Size([4, 16])
assert mapping['output'].type == OperationDataType.OUTPUT
# SS = SR x RS
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_0' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('m1')
weight_sharding_spec = strategy.get_sharding_spec_by_name('transpose')
output_sharding_spec = strategy.get_sharding_spec_by_name('linear')
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[1]
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@parameterize('input_shape', [(16,), (4, 16)])
@parameterize('model_cls', [AddmmModel, AddmmModel_with_param])
@rerun_if_address_is_in_use()
def test_addmm_handler(input_shape, model_cls):
world_size = 4
run_func_function = partial(check_addmm_function_handler,
input_shape=input_shape,
model_cls=model_cls,
world_size=world_size,
port=free_port())
mp.spawn(run_func_function, nprocs=world_size)
if __name__ == '__main__':
test_addmm_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.where_handler import \
WhereHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (OperationData, OperationDataType, StrategiesVector)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.fx.tracer.meta_patch.patched_module import linear
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, condition, x, y):
output = torch.where(condition, x, y)
return output
def test_where_handler():
model = ConvModel()
tracer = ColoTracer()
# graph():
# %condition : torch.Tensor [#users=1] = placeholder[target=condition]
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %y : torch.Tensor [#users=1] = placeholder[target=y]
# %where : [#users=1] = call_function[target=torch.where](args = (%condition, %x, %y), kwargs = {})
# return where
graph = tracer.trace(model,
meta_args={
"condition": torch.rand(4, 4, 64, 64).to('meta'),
"x": torch.rand(4, 1, 64, 64).to('meta'),
"y": torch.rand(1, 4, 64, 64).to('meta')
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
where_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(where_node)
# build handler
handler = WhereHandler(node=where_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping, _ = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping['condition'].name == "condition"
assert mapping['condition'].data.is_meta
assert mapping['condition'].data.shape == torch.Size([4, 4, 64, 64])
assert mapping['condition'].type == OperationDataType.ARG
assert mapping['condition'].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping['x'].name == "x"
assert mapping['x'].data.is_meta
assert mapping['x'].data.shape == torch.Size([4, 1, 64, 64])
assert mapping['x'].type == OperationDataType.ARG
assert mapping['x'].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping['y'].name == "y"
assert mapping['y'].data.is_meta
assert mapping['y'].data.shape == torch.Size([1, 4, 64, 64])
assert mapping['y'].type == OperationDataType.ARG
assert mapping['y'].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping['output'].name == "where"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size([4, 4, 64, 64])
assert mapping['output'].type == OperationDataType.OUTPUT
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# 4*3 + 4*3/2*2 + 1
assert len(strategy_name_list) == 25
if __name__ == '__main__':
test_where_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.tensor_constructor_handler import TensorConstructorHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing.pytest_wrapper import run_on_environment_flag
class TensorConstructorModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
arange_node = torch.arange(x.size()[0])
x = x + arange_node
return x
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_where_handler():
model = TensorConstructorModel()
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=2] = placeholder[target=x]
# %size : [#users=1] = call_method[target=size](args = (%x,), kwargs = {})
# %getitem : [#users=1] = call_function[target=operator.getitem](args = (%size, 0), kwargs = {})
# %arange : [#users=1] = call_function[target=torch.arange](args = (%getitem,), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%x, %arange), kwargs = {})
# return add
graph = tracer.trace(model, meta_args={
"x": torch.rand(10).to('meta'),
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
arange_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(arange_node)
# build handler
handler = TensorConstructorHandler(node=arange_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping['output'].name == "arange"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size([10])
assert mapping['output'].type == OperationDataType.OUTPUT
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert 'Replica Tensor Constructor' in strategy_name_list
if __name__ == '__main__':
test_where_handler()
|
from faulthandler import disable
from functools import partial
from xml.dom import WrongDocumentErr
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from typing_extensions import Self
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
WEIGHT_SHAPE = (32, 16)
class LinearModule(torch.nn.Module):
def __init__(self, weight_shape):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(*weight_shape))
self.bias = torch.nn.Parameter(torch.rand(weight_shape[0]))
def forward(self, x):
x = F.linear(x, self.weight, bias=self.bias)
return x
def check_linear_module_handler(rank, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = LinearModule(weight_shape=WEIGHT_SHAPE).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 4, 4, 16).cuda()
# the index of linear node in computation graph
node_index = 3
# strategy number of linear node
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ['x']
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type='bias_module')
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %weight : [#users=1] = get_attr[target=weight]
# %bias : [#users=1] = get_attr[target=bias]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%x, %weight), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%linear, %bias), kwargs = {})
# return add
graph = tracer.trace(model, meta_args={"x": torch.rand(4, 4, 4, 16).to('meta')})
gm = ColoGraphModule(model, graph)
linear_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping['input'].name == "x"
assert mapping['input'].data.shape == torch.Size([4, 4, 4, 16])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([64, 16])
assert mapping['other'].name == "weight"
assert mapping['other'].data.shape == torch.Size([32, 16])
assert mapping['other'].type == OperationDataType.PARAM
assert mapping['other'].logical_shape == torch.Size([16, 32])
assert 'bias' not in mapping
assert mapping['output'].name == "linear"
assert mapping['output'].data.shape == torch.Size([4, 4, 4, 32])
assert mapping['output'].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S0S1 = S0R x RS1_1' in strategy_name_list
assert 'S0S1 = S0R x RS1_2' in strategy_name_list
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
assert 'S1S0 = S1R x RS0_1' in strategy_name_list
assert 'S1S0 = S1R x RS0_2' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S0R = S0S1 x S1R_1' in strategy_name_list
assert 'S0R = S0S1 x S1R_2' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_1' in strategy_name_list
assert 'S1R = S1S0 x S0R_2' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_0' in strategy_name_list
assert 'S01R = S01R x RR_1' in strategy_name_list
assert 'S01R = S01R x RR_2' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('x')
weight_sharding_spec = strategy.get_sharding_spec_by_name('weight')
output_sharding_spec = strategy.get_sharding_spec_by_name('linear')
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler():
world_size = 4
run_func_module = partial(check_linear_module_handler, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
test_linear_handler()
|
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.unary_elementwise_handler import UnaryElementwiseHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.fx.tracer.meta_patch.patched_module import linear
from colossalai.testing.pytest_wrapper import run_on_environment_flag
class ReLuModel(nn.Module):
def __init__(self):
super().__init__()
self.act = torch.nn.ReLU()
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other)
relu_node = self.act(conv_node)
return relu_node
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_elementwise_handler():
model = ReLuModel()
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %act : [#users=1] = call_module[target=act](args = (%conv2d,), kwargs = {})
# return act
graph = tracer.trace(model,
meta_args={
"input": torch.rand(4, 4, 64, 64).to('meta'),
"other": torch.rand(4, 16, 3, 3).to('meta'),
})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
conv_mod_node = list(graph.nodes)[2]
relu_mod_node = list(graph.nodes)[3]
relu_strategies_vector = StrategiesVector(relu_mod_node)
conv_strategies_vector = StrategiesVector(conv_mod_node)
# build handler
conv_handler = ConvFunctionHandler(node=conv_mod_node,
device_mesh=device_mesh,
strategies_vector=conv_strategies_vector)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(conv_mod_node, 'strategies_vector', conv_strategies_vector)
relu_handler = UnaryElementwiseHandler(node=relu_mod_node,
device_mesh=device_mesh,
strategies_vector=relu_strategies_vector)
relu_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = relu_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['input'].name == "conv2d"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([4, 4, 62, 62])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([4, 4, 62, 62])
assert mapping['output'].name == "act"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size([4, 4, 62, 62])
assert mapping['output'].type == OperationDataType.OUTPUT
# getitem is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(relu_strategies_vector) == len(conv_strategies_vector)
if __name__ == '__main__':
test_elementwise_handler()
|
import copy
from typing import Dict, List
import torch
from torch.fx import GraphModule
from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass
from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.solver import StrategiesConstructor
from colossalai.auto_parallel.tensor_shard.solver.cost_graph import CostGraph
from colossalai.auto_parallel.tensor_shard.solver.graph_analysis import GraphAnalyser
from colossalai.auto_parallel.tensor_shard.solver.solver import Solver
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx.tracer.tracer import ColoTracer
from colossalai.tensor.shape_consistency import to_global
from colossalai.testing.comparison import assert_close
def _build_model_to_compare(model: torch.nn.Module, input_args: List[torch.Tensor],
input_kwargs: Dict[str, torch.Tensor], grad_dict: Dict[any, torch.Tensor]):
model_to_compare = copy.deepcopy(model)
args_to_compare = []
kwargs_to_compare = {}
for arg_index, input_tensor in enumerate(input_args):
def wrapper(param, index):
def hook_fn(grad):
grad_dict[index] = grad
param.register_hook(hook_fn)
arg_to_compare = copy.deepcopy(input_tensor)
# only Tensors of floating point and complex dtype can require gradients
if arg_to_compare.dtype != torch.int64:
arg_to_compare.requires_grad = True
wrapper(arg_to_compare, arg_index)
args_to_compare.append(arg_to_compare)
for name, input_kwarg in input_kwargs.items():
def wrapper(param, name):
def hook_fn(grad):
grad_dict[name] = grad
param.register_hook(hook_fn)
kwarg_to_compare = copy.deepcopy(input_kwarg)
# only Tensors of floating point and complex dtype can require gradients
if kwarg_to_compare.dtype != torch.int64:
kwarg_to_compare.requires_grad = True
wrapper(kwarg_to_compare, name)
kwargs_to_compare[name] = kwarg_to_compare
return model_to_compare, args_to_compare, kwargs_to_compare
def numerical_test_for_node_strategy(model: torch.nn.Module,
device_mesh: DeviceMesh,
node_index: int,
strategy_number: int,
input_args: List[torch.Tensor],
meta_arg_names: List[str],
input_kwargs: Dict[str, torch.Tensor] = {},
node_type: str = 'normal'):
for strategy_index in range(strategy_number):
print(f'#strategy_index: {strategy_index}')
# We need to copy the model to avoid do backward more than once in same graph
grad_to_compare_dict = {}
grad_to_shard_dict = {}
model_to_compare, args_to_compare, kwargs_to_compare = _build_model_to_compare(
model, input_args, input_kwargs, grad_to_compare_dict)
model_to_shard, args_to_shard, kwargs_to_shard = _build_model_to_compare(model, input_args, input_kwargs,
grad_to_shard_dict)
tracer = ColoTracer()
input_sample = {}
for input_arg, meta_arg_name in zip(input_args, meta_arg_names):
input_sample[meta_arg_name] = torch.rand(input_arg.shape).to('meta')
for meta_kwarg_name, input_kwarg in input_kwargs.items():
input_sample[meta_kwarg_name] = torch.rand(input_kwarg.shape).to('meta')
graph = tracer.trace(root=model_to_shard, meta_args=input_sample)
gm = GraphModule(model_to_shard, graph, model_to_shard.__class__.__name__)
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
target_node = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies
][node_index]
if node_type == 'normal':
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
elif node_type == 'following':
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
solution[node_index + 1] = strategy_index
else:
node_vector = strategies_constructor.leaf_strategies[node_index]
strategy_to_keep = node_vector[strategy_index]
node_vector = [strategy_to_keep]
# solution construction
cost_graph = CostGraph(strategies_constructor.leaf_strategies)
cost_graph.simplify_graph()
graph_analyser = GraphAnalyser(gm)
solver = Solver(gm.graph, strategies_constructor, cost_graph, graph_analyser, verbose=False)
ret = solver.call_solver_serialized_args()
solution = list(ret[0])
gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass(
gm, solution, device_mesh, strategies_constructor)
gm = runtime_apply_pass(gm)
gm.recompile()
# forward result compare
output = gm(*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard)
output_to_compare = model_to_compare(*args_to_compare, **kwargs_to_compare)
assert_close_helper(output, output_to_compare, strategy_index=strategy_index, type='forward output')
# backward result compare
if isinstance(output, (tuple, list)):
loss = output[0].sum()
loss_to_compare = output_to_compare[0].sum()
else:
loss = output.sum()
loss_to_compare = output_to_compare.sum()
loss_to_compare.backward()
loss.backward()
for key in grad_to_shard_dict.keys():
grad_to_shard = grad_to_shard_dict[key]
grad_to_compare = grad_to_compare_dict[key]
assert_close_helper(grad_to_shard, grad_to_compare, strategy_index=strategy_index, type='input grad')
# extract the strategy used in this iter
strategy_in_use = target_node.strategies_vector[strategy_index]
param_to_shard_dict = dict(gm.named_parameters())
param_to_compare_dict = dict(model_to_compare.named_parameters())
for name in param_to_shard_dict.keys():
param_name = name.split('.')[-1]
if node_type == 'normal':
param_sharding_spec = strategy_in_use.get_sharding_spec_by_name(param_name)
else:
if 'weight' in name:
param_sharding_spec = None
for node in list(graph.nodes):
if 'weight' in node.name:
param_sharding_spec = node.sharding_spec
elif 'bias' in name:
param_sharding_spec = None
for node in list(graph.nodes):
if 'bias' in node.name:
param_sharding_spec = node.sharding_spec
assert param_sharding_spec is not None
grad_sharded = param_to_shard_dict[name].grad
grad_to_compare = param_to_compare_dict[name].grad
global_grad = to_global(grad_sharded, param_sharding_spec)
assert_close_helper(global_grad, grad_to_compare, strategy_index=strategy_index, type='param grad')
def assert_close_helper(first: torch.Tensor,
second: torch.Tensor,
rtol: float = 1e-2,
atol: float = 1e-2,
strategy_index: int = -1,
type: str = 'not defined'):
"""
This method is used to check whether the average difference between two tensors is as close as expected.
"""
try:
if isinstance(first, (tuple, list)):
for first_element, second_element in zip(first, second):
assert_close(first_element, second_element, rtol=rtol, atol=atol)
else:
assert_close(first, second, rtol=rtol, atol=atol)
except:
print(f'strategy index {strategy_index} encounter assert_close error on {type}')
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import ViewHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvViewModel(nn.Module):
def __init__(self, tgt_shape):
super().__init__()
self.tgt_shape = tgt_shape
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
reshape_node = conv_node.view(*self.tgt_shape)
return reshape_node
class LinearViewModel(nn.Module):
def __init__(self, tgt_shape):
super().__init__()
self.tgt_shape = tgt_shape
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
reshape_node = linear_node.view(*self.tgt_shape)
return reshape_node
def check_view_handler(rank, tgt_shape, model_cls, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = model_cls(tgt_shape).cuda()
if model_cls.__name__ == 'ConvViewModel':
input = torch.rand(8, 8, 66, 66).to('cuda')
other = torch.rand(16, 8, 3, 3).to('cuda')
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == 'LinearViewModel':
input = torch.rand(8, 16, 64, 32).to('cuda')
other = torch.rand(64, 32).to('cuda')
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=['input', 'other'],
node_type='following')
tracer = ColoTracer()
if model_cls.__name__ == 'ConvViewModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %view : [#users=1] = call_method[target=view](args = (%conv2d, 2, -1), kwargs = {})
# return view
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 8, 66, 66).to('meta'),
"other": torch.rand(16, 8, 3, 3).to('meta'),
})
if model_cls.__name__ == 'LinearViewModel':
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %view : [#users=1] = call_method[target=view](args = (%linear, 32, 4, 32, 32, 4), kwargs = {})
# return view
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 16, 64, 32).to('meta'),
"other": torch.rand(64, 32).to('meta'),
})
gm = ColoGraphModule(model, graph)
previous_mod_node = list(graph.nodes)[2]
view_node = list(graph.nodes)[3]
view_strategies_vector = StrategiesVector(view_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == 'ConvViewModel':
conv_handler = ConvFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
if model_cls.__name__ == 'LinearViewModel':
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
view_handler = ViewHandler(node=view_node, device_mesh=device_mesh, strategies_vector=view_strategies_vector)
view_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = view_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == 'ConvViewModel':
assert mapping['input'].name == "conv2d"
else:
assert mapping['input'].name == "linear"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping['output'].name == "view"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size(tgt_shape)
assert mapping['output'].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(view_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in view_strategies_vector]
if model_cls.__name__ == 'ConvViewModel':
if tgt_shape == (32, 4, 64, 16, 4):
assert '[S0, S1, R, R] -> FULLY REPLICATED_0' in strategy_name_list
assert '[S1, S0, R, R] -> FULLY REPLICATED_1' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R]_2' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R]_3' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R]_4' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R]_5' in strategy_name_list
assert '[R, S1, R, R] -> FULLY REPLICATED_6' in strategy_name_list
assert '[R, S0, R, R] -> FULLY REPLICATED_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R] -> FULLY REPLICATED_10' in strategy_name_list
assert '[R, S1, R, R] -> FULLY REPLICATED_11' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R] -> FULLY REPLICATED_15' in strategy_name_list
if tgt_shape == (8, 4, 4, 64, 16, 4):
assert '[S0, S1, R, R] -> [S0, S1, R, R, R, R]_0' in strategy_name_list
assert '[S1, S0, R, R] -> [S1, S0, R, R, R, R]_1' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R, R]_2' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R, R]_3' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R, R]_4' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R, R]_5' in strategy_name_list
assert '[R, S1, R, R] -> [R, S1, R, R, R, R]_6' in strategy_name_list
assert '[R, S0, R, R] -> [R, S0, R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_9' in strategy_name_list
assert '[R, S0, R, R] -> [R, S0, R, R, R, R]_10' in strategy_name_list
assert '[R, S1, R, R] -> [R, S1, R, R, R, R]_11' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_12' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R, R, R]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_14' in strategy_name_list
assert '[R, S01, R, R] -> [R, S01, R, R, R, R]_15' in strategy_name_list
if model_cls.__name__ == 'LinearViewModel':
if tgt_shape == (32, 4, 64, 16, 4):
for strategy in strategy_name_list:
print(strategy)
# print(strategy_name_list)
assert '[S0, R, R, S1] -> [S0, R, R, S1, R]_11' in strategy_name_list
assert '[R, S0, R, S1] -> FULLY REPLICATED_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, R, S0, S1, R]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, R, S0, R]_14' in strategy_name_list
assert '[R, S1, R, S0] -> FULLY REPLICATED_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, R, S1, S0, R]_16' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> FULLY REPLICATED_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, S0, R, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> FULLY REPLICATED_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, S1, R, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1, R]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0, R]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0, R]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1, R]_5' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> FULLY REPLICATED_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, S01, R, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01, R]_4' in strategy_name_list
if tgt_shape == (8, 4, 4, 64, 16, 4):
assert '[S0, R, R, S1] -> [S0, R, R, R, S1, R]_11' in strategy_name_list
assert '[R, S0, R, S1] -> [R, S0, R, R, S1, R]_12' in strategy_name_list
assert '[R, R, S0, S1] -> [R, R, R, S0, S1, R]_13' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, R, R, S0, R]_14' in strategy_name_list
assert '[R, S1, R, S0] -> [R, S1, R, R, S0, R]_15' in strategy_name_list
assert '[R, R, S1, S0] -> [R, R, R, S1, S0, R]_16' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R, R, R]_17' in strategy_name_list
assert '[R, S0, R, R] -> [R, S0, R, R, R, R]_18' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, R, S0, R, R]_19' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R, R, R]_20' in strategy_name_list
assert '[R, S1, R, R] -> [R, S1, R, R, R, R]_21' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, R, S1, R, R]_22' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, R, S1, R]_10' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, R, S0, R]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_7' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, R, S0, R]_6' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, R, S1, R]_5' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R, R, R]_0' in strategy_name_list
assert '[R, S01, R, R] -> [R, S01, R, R, R, R]_1' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, R, S01, R, R]_2' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R, R, R]_3' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, R, S01, R]_4' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize('tgt_shape', [(32, 4, 64, 16, 4), (8, 4, 4, 64, 16, 4)])
@parameterize('model_cls', [ConvViewModel, LinearViewModel])
def test_view_handler(tgt_shape, model_cls):
world_size = 4
run_func = partial(check_view_handler,
tgt_shape=tgt_shape,
model_cls=model_cls,
world_size=world_size,
port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_view_handler()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_linear_module_handler(rank, bias, input_shape, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = nn.Sequential(nn.Linear(16, 32, bias=bias)).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(input_shape).cuda()
# the index of linear node in computation graph
node_index = 1
# strategy number of linear node
if input_shape == (1, 4, 4, 16):
strategy_number = 19
else:
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ['input']
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names)
tracer = ColoTracer()
graph = tracer.trace(model, meta_args={"input": torch.rand(input_shape).to('meta')})
gm = ColoGraphModule(model, graph)
linear_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearModuleHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping['input'].name == "input_1"
assert mapping['input'].data.shape == torch.Size(input_shape)
assert mapping['input'].type == OperationDataType.ARG
input_logical_shape = mapping['input'].data.view(-1, 16).shape
assert mapping['input'].logical_shape == input_logical_shape
assert mapping['other'].name == "weight"
assert mapping['other'].data.shape == torch.Size([32, 16])
assert mapping['other'].type == OperationDataType.PARAM
assert mapping['other'].logical_shape == torch.Size([16, 32])
if bias:
assert mapping['bias'].name == "bias"
assert mapping['bias'].data.shape == torch.Size([32])
assert mapping['bias'].type == OperationDataType.PARAM
assert mapping['bias'].logical_shape == torch.Size([32])
assert mapping['output'].name == "_0"
output_shape = input_shape[:-1] + (32,)
assert mapping['output'].data.shape == torch.Size(output_shape)
assert mapping['output'].type == OperationDataType.OUTPUT
output_logical_shape = mapping['output'].data.view(-1, 32).shape
assert mapping['output'].logical_shape == torch.Size(output_logical_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# First dimension cannot be shard if input shape is (1, 4, 4, 16)
if input_shape != (1, 4, 4, 16):
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S01R = S01R x RR_0' in strategy_name_list
# SS = SR x RS
assert 'S0S1 = S0R x RS1_1' in strategy_name_list
assert 'S0S1 = S0R x RS1_2' in strategy_name_list
assert 'S1S0 = S1R x RS0_1' in strategy_name_list
assert 'S1S0 = S1R x RS0_2' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_1' in strategy_name_list
assert 'S0R = S0S1 x S1R_2' in strategy_name_list
assert 'S1R = S1S0 x S0R_1' in strategy_name_list
assert 'S1R = S1S0 x S0R_2' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_1' in strategy_name_list
assert 'S01R = S01R x RR_2' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('input_1')
weight_sharding_spec = strategy.get_sharding_spec_by_name('weight')
output_sharding_spec = strategy.get_sharding_spec_by_name('_0')
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name('bias')
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others, bias=None):
x = nn.functional.linear(input, others, bias=bias)
return x
def check_linear_function_handler(rank, bias, input_shape, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = LinearModel().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(input_shape).cuda()
other = torch.rand(32, 16).cuda()
# the index of linear node in computation graph
node_index = 2
# strategy number of linear node
if input_shape == (1, 4, 4, 16):
strategy_number = 19
else:
strategy_number = 24
# construct input args
input_args = [input, other]
# construct meta arg names
meta_arg_names = ['input', 'others']
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names)
tracer = ColoTracer()
graph = tracer.trace(model,
meta_args={
"input": torch.rand(input_shape).to('meta'),
'others': torch.rand(32, 16).to('meta')
})
gm = ColoGraphModule(model, graph)
if bias:
linear_func_node = list(graph.nodes)[3]
else:
linear_func_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_func_node)
# build handler
handler = LinearFunctionHandler(node=linear_func_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# # check operation data mapping
mapping = handler.get_operation_data_mapping()
assert mapping['input'].name == "input_1"
assert mapping['input'].data.shape == torch.Size(input_shape)
assert mapping['input'].type == OperationDataType.ARG
input_logical_shape = mapping['input'].data.view(-1, 16).shape
assert mapping['input'].logical_shape == torch.Size(input_logical_shape)
assert mapping['other'].name == "others"
assert mapping['other'].data.shape == torch.Size([32, 16])
assert mapping['other'].type == OperationDataType.ARG
assert mapping['other'].logical_shape == torch.Size([16, 32])
if bias:
assert mapping['bias'].name == "bias"
assert mapping['bias'].data.shape == torch.Size([32])
assert mapping['bias'].type == OperationDataType.ARG
assert mapping['other'].logical_shape == torch.Size([16, 32])
assert mapping['output'].name == "linear"
output_shape = input_shape[:-1] + (32,)
assert mapping['output'].data.shape == torch.Size(output_shape)
assert mapping['output'].type == OperationDataType.OUTPUT
output_logical_shape = mapping['output'].data.view(-1, 32).shape
assert mapping['output'].logical_shape == torch.Size(output_logical_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# First dimension cannot be shard if input shape is (1, 4, 4, 16)
if input_shape != (1, 4, 4, 16):
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S01R = S01R x RR_0' in strategy_name_list
# SS = SR x RS
assert 'S0S1 = S0R x RS1_1' in strategy_name_list
assert 'S0S1 = S0R x RS1_2' in strategy_name_list
assert 'S1S0 = S1R x RS0_1' in strategy_name_list
assert 'S1S0 = S1R x RS0_2' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_1' in strategy_name_list
assert 'S0R = S0S1 x S1R_2' in strategy_name_list
assert 'S1R = S1S0 x S0R_1' in strategy_name_list
assert 'S1R = S1S0 x S0R_2' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_1' in strategy_name_list
assert 'S01R = S01R x RR_2' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('input_1')
weight_sharding_spec = strategy.get_sharding_spec_by_name('others')
output_sharding_spec = strategy.get_sharding_spec_by_name('linear')
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name('bias')
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name='AUTO_PARALLEL')
@parameterize('input_shape', [(1, 4, 4, 16), (4, 4, 4, 16)])
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler(input_shape, bias=False):
world_size = 4
run_func_module = partial(check_linear_module_handler,
bias=bias,
input_shape=input_shape,
world_size=world_size,
port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
run_func_function = partial(check_linear_function_handler,
bias=bias,
input_shape=input_shape,
world_size=world_size,
port=free_port())
mp.spawn(run_func_function, nprocs=world_size)
if __name__ == '__main__':
test_linear_handler()
|
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.sum_handler import SumHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearSumModel(nn.Module):
def __init__(self, sum_dims, keepdim):
super().__init__()
self.sum_dims = sum_dims
self.keepdim = keepdim
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
if self.sum_dims is not None:
sum_node = torch.sum(linear_node, self.sum_dims, keepdim=self.keepdim)
else:
sum_node = torch.sum(linear_node, keepdim=self.keepdim)
return sum_node
def check_sum_handler(rank, sum_dims, keepdim, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = LinearSumModel(sum_dims=sum_dims, keepdim=keepdim).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(8, 16, 64, 32).to('cuda')
other = torch.rand(64, 32).to('cuda')
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 24
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=['input', 'other'],
node_type='following')
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %sum_1 : [#users=1] = call_function[target=torch.sum](args = (%linear,), kwargs = {})
# return sum_1
graph = tracer.trace(model,
meta_args={
"input": torch.rand(8, 16, 64, 32).to('meta'),
"other": torch.rand(64, 32).to('meta'),
})
gm = ColoGraphModule(model, graph)
previous_mod_node = list(graph.nodes)[2]
sum_node = list(graph.nodes)[3]
sum_strategies_vector = StrategiesVector(sum_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(node=previous_mod_node,
device_mesh=device_mesh,
strategies_vector=previous_strategies_vector)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, 'strategies_vector', previous_strategies_vector)
sum_handler = SumHandler(node=sum_node, device_mesh=device_mesh, strategies_vector=sum_strategies_vector)
sum_handler.register_strategy(compute_resharding_cost=False)
# sum handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(sum_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in sum_strategies_vector]
# check operation data mapping
mapping = sum_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['input'].name == "linear"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([8, 16, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping['output'].name == "sum_1"
sum_node_shape = torch.empty([8, 16, 64, 64]).sum(sum_dims, keepdim=keepdim).shape
assert mapping['output'].logical_shape == sum_node_shape
assert mapping['output'].type == OperationDataType.OUTPUT
# check strategy name
if sum_dims == (0, 2) and keepdim == False:
assert '[R, R, R, S1] -> [R, S1]_0' in strategy_name_list
assert '[R, S0, R, S1] -> [S0, S1]_1' in strategy_name_list
assert '[R, R, R, S1] -> [R, S1]_2' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0]_3' in strategy_name_list
assert '[R, S1, R, S0] -> [S1, S0]_4' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0]_5' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_6' in strategy_name_list
assert '[R, S0, R, R] -> [S0, R]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_9' in strategy_name_list
assert '[R, S1, R, R] -> [S1, R]_10' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_11' in strategy_name_list
assert '[R, R, R, S1] -> [R, S1]_12' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_14' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_15' in strategy_name_list
assert '[R, R, R, S0] -> [R, S0]_16' in strategy_name_list
assert '[R, R, R, S1] -> [R, S1]_17' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_18' in strategy_name_list
assert '[R, S01, R, R] -> [S01, R]_19' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_20' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_21' in strategy_name_list
assert '[R, R, R, S01] -> [R, S01]_22' in strategy_name_list
assert '[R, R, R, R] -> [R, R]_23' in strategy_name_list
if sum_dims == (0, 2) and keepdim == True:
assert '[R, R, R, S1] -> [R, R, R, S1]_0' in strategy_name_list
assert '[R, S0, R, S1] -> [R, S0, R, S1]_1' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_2' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_3' in strategy_name_list
assert '[R, S1, R, S0] -> [R, S1, R, S0]_4' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_5' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_6' in strategy_name_list
assert '[R, S0, R, R] -> [R, S0, R, R]_7' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_8' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_9' in strategy_name_list
assert '[R, S1, R, R] -> [R, S1, R, R]_10' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_11' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_12' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_14' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_15' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_16' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_17' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_18' in strategy_name_list
assert '[R, S01, R, R] -> [R, S01, R, R]_19' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_20' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_21' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_22' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_23' in strategy_name_list
if sum_dims == 1 and keepdim == False:
assert '[S0, R, R, S1] -> [S0, R, S1]_0' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, S1]_1' in strategy_name_list
assert '[R, R, S0, S1] -> [R, S0, S1]_2' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, S0]_3' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, S0]_4' in strategy_name_list
assert '[R, R, S1, S0] -> [R, S1, S0]_5' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R]_6' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_7' in strategy_name_list
assert '[R, R, S0, R] -> [R, S0, R]_8' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_10' in strategy_name_list
assert '[R, R, S1, R] -> [R, S1, R]_11' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, S1]_12' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, S0]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_14' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_15' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, S0]_16' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, S1]_17' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R]_18' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_19' in strategy_name_list
assert '[R, R, S01, R] -> [R, S01, R]_20' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_21' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, S01]_22' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R]_23' in strategy_name_list
if sum_dims == 1 and keepdim == True:
assert '[S0, R, R, S1] -> [S0, R, R, S1]_0' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_1' in strategy_name_list
assert '[R, R, S0, S1] -> [R, R, S0, S1]_2' in strategy_name_list
assert '[S1, R, R, S0] -> [S1, R, R, S0]_3' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_4' in strategy_name_list
assert '[R, R, S1, S0] -> [R, R, S1, S0]_5' in strategy_name_list
assert '[S0, R, R, R] -> [S0, R, R, R]_6' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_7' in strategy_name_list
assert '[R, R, S0, R] -> [R, R, S0, R]_8' in strategy_name_list
assert '[S1, R, R, R] -> [S1, R, R, R]_9' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_10' in strategy_name_list
assert '[R, R, S1, R] -> [R, R, S1, R]_11' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_12' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_13' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_14' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_15' in strategy_name_list
assert '[R, R, R, S0] -> [R, R, R, S0]_16' in strategy_name_list
assert '[R, R, R, S1] -> [R, R, R, S1]_17' in strategy_name_list
assert '[S01, R, R, R] -> [S01, R, R, R]_18' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_19' in strategy_name_list
assert '[R, R, S01, R] -> [R, R, S01, R]_20' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_21' in strategy_name_list
assert '[R, R, R, S01] -> [R, R, R, S01]_22' in strategy_name_list
assert '[R, R, R, R] -> [R, R, R, R]_23' in strategy_name_list
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize('sum_dims', [(0, 2), 1])
@parameterize('keepdim', [False, True])
def test_sum_handler(sum_dims, keepdim):
world_size = 4
run_func = partial(check_sum_handler, sum_dims=sum_dims, keepdim=keepdim, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sum_handler()
|
import pytest
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler.normal_pooling_handler import NormPoolingHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.fx.tracer.meta_patch.patched_module import linear
from colossalai.testing.pytest_wrapper import run_on_environment_flag
@run_on_environment_flag(name='AUTO_PARALLEL')
def test_norm_pool_handler():
model = nn.Sequential(nn.MaxPool2d(4, padding=1).to('meta'))
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %_0 : [#users=1] = call_module[target=0](args = (%input_1,), kwargs = {})
# return _0
graph = tracer.trace(model, meta_args={"input": torch.rand(4, 4, 64, 64).to('meta')})
gm = ColoGraphModule(model, graph)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
conv_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(conv_mod_node)
# build handler
handler = NormPoolingHandler(node=conv_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping['input'].name == "input_1"
assert mapping['input'].data.is_meta
assert mapping['input'].data.shape == torch.Size([4, 4, 64, 64])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping['output'].name == "_0"
assert mapping['output'].data.is_meta
assert mapping['output'].data.shape == torch.Size([4, 4, 16, 16])
assert mapping['output'].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert len(strategy_name_list) == 9
if __name__ == '__main__':
test_norm_pool_handler()
|
from faulthandler import disable
from functools import partial
from xml.dom import WrongDocumentErr
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from typing_extensions import Self
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, parameterize, rerun_if_address_is_in_use
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize
from colossalai.utils import free_port
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearModule(torch.nn.Module):
def __init__(self, in_features, out_features, bias):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
def forward(self, x):
x = self.linear(x)
return x
def check_linear_module_handler(rank, bias, world_size, port):
disable_existing_loggers()
launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
model = LinearModule(16, 32, bias=bias).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 4, 4, 16).cuda()
# the index of linear node in computation graph
node_index = 3
# strategy number of linear node
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ['x']
numerical_test_for_node_strategy(model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type='bias_module')
tracer = ColoTracer()
graph = tracer.trace(model, meta_args={"x": torch.rand(4, 4, 4, 16).to('meta')})
gm = ColoGraphModule(model, graph)
linear_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping['input'].name == "x"
assert mapping['input'].data.shape == torch.Size([4, 4, 4, 16])
assert mapping['input'].type == OperationDataType.ARG
assert mapping['input'].logical_shape == torch.Size([64, 16])
assert mapping['other'].name == "linear_weight"
assert mapping['other'].data.shape == torch.Size([32, 16])
assert mapping['other'].type == OperationDataType.PARAM
assert mapping['other'].logical_shape == torch.Size([16, 32])
assert 'bias' not in mapping
assert mapping['output'].name == "linear"
assert mapping['output'].data.shape == torch.Size([4, 4, 4, 32])
assert mapping['output'].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert 'S0S1 = S0R x RS1_0' in strategy_name_list
assert 'S0S1 = S0R x RS1_1' in strategy_name_list
assert 'S0S1 = S0R x RS1_2' in strategy_name_list
assert 'S1S0 = S1R x RS0_0' in strategy_name_list
assert 'S1S0 = S1R x RS0_1' in strategy_name_list
assert 'S1S0 = S1R x RS0_2' in strategy_name_list
# SR = SS x SR
assert 'S0R = S0S1 x S1R_0' in strategy_name_list
assert 'S0R = S0S1 x S1R_1' in strategy_name_list
assert 'S0R = S0S1 x S1R_2' in strategy_name_list
assert 'S1R = S1S0 x S0R_0' in strategy_name_list
assert 'S1R = S1S0 x S0R_1' in strategy_name_list
assert 'S1R = S1S0 x S0R_2' in strategy_name_list
# RS = RS x SS
assert 'RS0 = RS1 x S1S0' in strategy_name_list
assert 'RS1 = RS0 x S0S1' in strategy_name_list
# RR = RS x SR
assert 'RR = RS0 x S0R' in strategy_name_list
assert 'RR = RS1 x S1R' in strategy_name_list
# RS= RR x RS
assert 'RS0 = RR x RS0' in strategy_name_list
assert 'RS1 = RR x RS1' in strategy_name_list
# S01R = S01R x RR
assert 'S01R = S01R x RR_0' in strategy_name_list
assert 'S01R = S01R x RR_1' in strategy_name_list
assert 'S01R = S01R x RR_2' in strategy_name_list
# RR = RS01 x S01R
assert 'RR = RS01 x S01R' in strategy_name_list
# RS01 = RR x RS01
assert 'RS01 = RR x RS01' in strategy_name_list
# RR = RR x RR
assert 'RR = RR x RR' in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name('x')
weight_sharding_spec = strategy.get_sharding_spec_by_name('linear_weight')
output_sharding_spec = strategy.get_sharding_spec_by_name('linear')
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name='AUTO_PARALLEL')
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler(bias=True):
world_size = 4
run_func_module = partial(check_linear_module_handler, bias=bias, world_size=world_size, port=free_port())
mp.spawn(run_func_module, nprocs=world_size)
if __name__ == '__main__':
test_linear_handler()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.