Add files using upload-large-folder tool
Browse files- 100000/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_16_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_18_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_24_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_28_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_30_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_31_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
- 100000/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
- 100000/ema.pt.zst +3 -0
- 100000/mp_rank_00_model_states.pt +3 -0
- 100000/rng-0.ckpt +3 -0
- 100000/rng-1.ckpt +3 -0
- 100000/rng-10.ckpt +3 -0
- 100000/rng-11.ckpt +3 -0
- 100000/rng-12.ckpt +3 -0
- 100000/rng-13.ckpt +3 -0
- 100000/rng-14.ckpt +3 -0
- 100000/rng-15.ckpt +3 -0
- 100000/rng-16.ckpt +3 -0
- 100000/rng-17.ckpt +3 -0
- 100000/rng-18.ckpt +3 -0
- 100000/rng-19.ckpt +3 -0
- 100000/rng-2.ckpt +3 -0
- 100000/rng-20.ckpt +3 -0
- 100000/rng-21.ckpt +3 -0
- 100000/rng-22.ckpt +3 -0
- 100000/rng-23.ckpt +3 -0
- 100000/rng-24.ckpt +3 -0
- 100000/rng-25.ckpt +3 -0
- 100000/rng-26.ckpt +3 -0
- 100000/rng-27.ckpt +3 -0
- 100000/rng-28.ckpt +3 -0
- 100000/rng-29.ckpt +3 -0
- 100000/rng-3.ckpt +3 -0
- 100000/rng-30.ckpt +3 -0
- 100000/rng-31.ckpt +3 -0
- 100000/rng-4.ckpt +3 -0
- 100000/rng-5.ckpt +3 -0
- 100000/rng-6.ckpt +3 -0
- 100000/rng-7.ckpt +3 -0
- 100000/rng-8.ckpt +3 -0
- 100000/rng-9.ckpt +3 -0
- config.json +107 -0
- flops.txt +934 -0
- latest +1 -0
- zero_to_fp32.py +604 -0
100000/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0448b045eeb01eb5d8095a02b7ca3559fb819c665b7323d61dd5b01abfd6dd71
|
| 3 |
+
size 1142984120
|
100000/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e877618f703593edb2bd9c745f0ee04ba56d72d295c00c047cfb6da8dad2bd0
|
| 3 |
+
size 1142983864
|
100000/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54de03921dd7d50417fe5a367a9032436e298bb69b776ca69975ec507bc298ff
|
| 3 |
+
size 1142984184
|
100000/bf16_zero_pp_rank_16_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87ed0de1c57b5ddfeac7957f355dd6a3c5dadca28b701e6b54a9f75abd6727a9
|
| 3 |
+
size 1142984184
|
100000/bf16_zero_pp_rank_18_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9766779240f93b336c61f04581f9f9657421d8f85e4de7dd022f228c7869e9c9
|
| 3 |
+
size 1142984120
|
100000/bf16_zero_pp_rank_24_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8751e7d180488ca2792cace02abf2cfc86102701bfc7a91a490109c411cd7acf
|
| 3 |
+
size 1142984120
|
100000/bf16_zero_pp_rank_28_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb3e43118710bd2d69be3ae6f0d1a55dac83e85f9739a7031ce8e66731532c2a
|
| 3 |
+
size 1142984184
|
100000/bf16_zero_pp_rank_30_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65d51a7101a21df0b7e45fc47f7295d7fee2dc557f0a1a2bcfa4a434644ed74b
|
| 3 |
+
size 1142983992
|
100000/bf16_zero_pp_rank_31_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0a2ddc85848c70339ec2a4dfc4b3105346bdeca728e57926fbb013b0419bbf5c
|
| 3 |
+
size 1142984888
|
100000/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c41dcea6410489c4332fb165a2e60c81d499e157061943bd166ac4f847ac4ef
|
| 3 |
+
size 1142984176
|
100000/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0e74d3ab249cbf04cd8112905dd75075c6823af387c73b2a2cb710a0c60279f
|
| 3 |
+
size 1142984112
|
100000/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d85898462626094918384286d6a016d4758c35a66c16ab907acbebbc0a1ff1c
|
| 3 |
+
size 1142984176
|
100000/ema.pt.zst
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:682ea8fca1c0e28bec06db6446d8cfee44d7a05d5de6cee4d75bb135bc71b592
|
| 3 |
+
size 11301373123
|
100000/mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9698b230989bb87517fb25dc6431a021dcad380023399b54dc414baeed469515
|
| 3 |
+
size 18338325156
|
100000/rng-0.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bb5b4727faf939c2791701613158408e130f58cad1382c138761ad169e98acc
|
| 3 |
+
size 14906
|
100000/rng-1.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:916850147c8cb799a58cb714b2a6dbc74f80316bd9e06dfc8c404bc62a2d01a7
|
| 3 |
+
size 14906
|
100000/rng-10.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64ef9a5f41060dcc0bc5811fd3d3dd895023ae5bfc7889cff5ead763623251ab
|
| 3 |
+
size 14915
|
100000/rng-11.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21e4cb5e622d4cf9a7210cf825526406ebbebf95982a2e897fbc4e2c35eee52e
|
| 3 |
+
size 14915
|
100000/rng-12.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:212b3009effa283891fa77dd98dabc78d0a30e536035ba686c49d558ab7fb809
|
| 3 |
+
size 14915
|
100000/rng-13.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf5cc6a4c24f492e903357192866578a7727b3ef122829f6b087382e56b31219
|
| 3 |
+
size 14915
|
100000/rng-14.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19071d6571957f5994add0eae66ca0adba5c0b914c74969f95c4f650f382d9c7
|
| 3 |
+
size 14915
|
100000/rng-15.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94b688b2ab22e5022af4fcc4bbe20ca2296906de249b108a06abe09feed0a84a
|
| 3 |
+
size 14915
|
100000/rng-16.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2444d3346cdc9e2e17a33c4fbbba8acc61d926542e2770c164a071a58c91ea0
|
| 3 |
+
size 14915
|
100000/rng-17.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a94bce3f38427488db4125505c2ddbba197d4fb71877f0ef108beb397c96fdd5
|
| 3 |
+
size 14915
|
100000/rng-18.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e2b823888affc27a052073dd61ca8763970b080079b8d5ab0cc60419f653a8b
|
| 3 |
+
size 14915
|
100000/rng-19.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:978ad2f51b5e37a9647da00f4d8b2f4a56a167f9258baa6c44f062e741b2fe89
|
| 3 |
+
size 14915
|
100000/rng-2.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b570083d8aab5cc274e4eeebd285f9ea4ac9f47bcba008c1cd9d0fc66bb318b
|
| 3 |
+
size 14906
|
100000/rng-20.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:885394dbcd4f27920d425576cc32401e15507cfa31259c496ea57645e1ebe9d9
|
| 3 |
+
size 14915
|
100000/rng-21.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb0e47e1ae16b85cd4de5e20381a37706a7e895a0b506a515e9cfa3eb1c55b5f
|
| 3 |
+
size 14915
|
100000/rng-22.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3abcc8137c26e19828a8389381972250fbbd119dc646785a06c8f59c2ba3b4fd
|
| 3 |
+
size 14915
|
100000/rng-23.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d471922bc8207f2cac22549e282e7104b99b80ac0c5607d8693a719861b4dd4
|
| 3 |
+
size 14915
|
100000/rng-24.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2e2ac11942ecdf990c54bcd488b3e6a07624a39f091d7e4e5ba4defa44be880
|
| 3 |
+
size 14915
|
100000/rng-25.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82ec4bd84d0764b0062c84c7ba04dd9307fb76a026b755952379f696f61c2aea
|
| 3 |
+
size 14915
|
100000/rng-26.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3c488b8418ed9511d6a9a6696323b8e1a92fe55635ec99e3f75b4fd6139d56df
|
| 3 |
+
size 14915
|
100000/rng-27.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b417db8a88bfebdd0f40af3b2b098cee8658f313b68d41d4b17cec90a64b2572
|
| 3 |
+
size 14915
|
100000/rng-28.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16178c33b8bfed85576174321d0d8f4f15f48514295c58f00e9cc35d1a43ae01
|
| 3 |
+
size 14915
|
100000/rng-29.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a35c2efce55dbee5a072f6913b42d2c9d680482b359b1d8d3985aa8935dac4d
|
| 3 |
+
size 14915
|
100000/rng-3.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:35d092f725aed26e952cf049fe53f7e28e094362a966ed1a92659798bd37f7db
|
| 3 |
+
size 14906
|
100000/rng-30.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec623f54939453ff59cf1c6a3094bd9cc926e3b69decefc0169f508079d01051
|
| 3 |
+
size 14915
|
100000/rng-31.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:402a431a8b78c557bf3169570493ad78d0bae37ce0c4eab1e9d7bbafcba06c92
|
| 3 |
+
size 14915
|
100000/rng-4.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:236ddb166fbade54d5609adeccc6f57ab75b8acc3d04da10c4c0ee964942820e
|
| 3 |
+
size 14906
|
100000/rng-5.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:054e766c0d083d3f1af7d4fdd8cffc3a4e365cf8ff23da3ea761584b112a48f2
|
| 3 |
+
size 14906
|
100000/rng-6.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ab96b7ee0eef3768d5bc14e3f8ba28861f67aa30be5b301354c7bfaac049dd2
|
| 3 |
+
size 14906
|
100000/rng-7.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:961901a7e00841a38e8d397016a1457d77a47576caed01cd9d3ab4b2cfbf663a
|
| 3 |
+
size 14906
|
100000/rng-8.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fdf07a858c1592b7fad503b7a80964e94ea15194a27dfeffee76a5a512dd7d5
|
| 3 |
+
size 14906
|
100000/rng-9.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95cd4916247cb62f865b8205343c53ce65b86e7593db000d70e4e961e08fb86e
|
| 3 |
+
size 14906
|
config.json
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attention": "self",
|
| 3 |
+
"base_config": {
|
| 4 |
+
"_name_or_path": "google/gemma-2b",
|
| 5 |
+
"add_cross_attention": false,
|
| 6 |
+
"architectures": [
|
| 7 |
+
"GemmaForCausalLM"
|
| 8 |
+
],
|
| 9 |
+
"attention_bias": false,
|
| 10 |
+
"attention_dropout": 0.0,
|
| 11 |
+
"bad_words_ids": null,
|
| 12 |
+
"begin_suppress_tokens": null,
|
| 13 |
+
"bos_token_id": 2,
|
| 14 |
+
"chunk_size_feed_forward": 0,
|
| 15 |
+
"cross_attention_hidden_size": null,
|
| 16 |
+
"decoder_start_token_id": null,
|
| 17 |
+
"diversity_penalty": 0.0,
|
| 18 |
+
"do_sample": false,
|
| 19 |
+
"early_stopping": false,
|
| 20 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 21 |
+
"eos_token_id": 1,
|
| 22 |
+
"exponential_decay_length_penalty": null,
|
| 23 |
+
"finetuning_task": null,
|
| 24 |
+
"forced_bos_token_id": null,
|
| 25 |
+
"forced_eos_token_id": null,
|
| 26 |
+
"head_dim": 128,
|
| 27 |
+
"hidden_act": "gelu",
|
| 28 |
+
"hidden_activation": "gelu_pytorch_tanh",
|
| 29 |
+
"hidden_size": 2048,
|
| 30 |
+
"id2label": {
|
| 31 |
+
"0": "LABEL_0",
|
| 32 |
+
"1": "LABEL_1"
|
| 33 |
+
},
|
| 34 |
+
"initializer_range": 0.02,
|
| 35 |
+
"intermediate_size": 8192,
|
| 36 |
+
"is_decoder": false,
|
| 37 |
+
"is_encoder_decoder": false,
|
| 38 |
+
"label2id": {
|
| 39 |
+
"LABEL_0": 0,
|
| 40 |
+
"LABEL_1": 1
|
| 41 |
+
},
|
| 42 |
+
"length_penalty": 1.0,
|
| 43 |
+
"max_length": 20,
|
| 44 |
+
"max_position_embeddings": 8192,
|
| 45 |
+
"min_length": 0,
|
| 46 |
+
"model_type": "gemma",
|
| 47 |
+
"no_repeat_ngram_size": 0,
|
| 48 |
+
"num_attention_heads": 32,
|
| 49 |
+
"num_beam_groups": 1,
|
| 50 |
+
"num_beams": 1,
|
| 51 |
+
"num_hidden_layers": 18,
|
| 52 |
+
"num_key_value_heads": 8,
|
| 53 |
+
"num_return_sequences": 1,
|
| 54 |
+
"output_attentions": false,
|
| 55 |
+
"output_hidden_states": false,
|
| 56 |
+
"output_scores": false,
|
| 57 |
+
"pad_token_id": 0,
|
| 58 |
+
"prefix": null,
|
| 59 |
+
"problem_type": null,
|
| 60 |
+
"pruned_heads": {},
|
| 61 |
+
"remove_invalid_values": false,
|
| 62 |
+
"repetition_penalty": 1.0,
|
| 63 |
+
"return_dict": true,
|
| 64 |
+
"return_dict_in_generate": false,
|
| 65 |
+
"rms_norm_eps": 1e-06,
|
| 66 |
+
"rope_scaling": null,
|
| 67 |
+
"rope_theta": 10000.0,
|
| 68 |
+
"sep_token_id": null,
|
| 69 |
+
"suppress_tokens": null,
|
| 70 |
+
"task_specific_params": null,
|
| 71 |
+
"temperature": 1.0,
|
| 72 |
+
"tf_legacy_loss": false,
|
| 73 |
+
"tie_encoder_decoder": false,
|
| 74 |
+
"tie_word_embeddings": true,
|
| 75 |
+
"tokenizer_class": null,
|
| 76 |
+
"top_k": 50,
|
| 77 |
+
"top_p": 1.0,
|
| 78 |
+
"torch_dtype": "bfloat16",
|
| 79 |
+
"torchscript": false,
|
| 80 |
+
"typical_p": 1.0,
|
| 81 |
+
"use_bfloat16": false,
|
| 82 |
+
"use_cache": true,
|
| 83 |
+
"vocab_size": 256000
|
| 84 |
+
},
|
| 85 |
+
"dit_hidden_size": 2048,
|
| 86 |
+
"dit_num_hidden_layers": 30,
|
| 87 |
+
"in_channels": 16,
|
| 88 |
+
"initial_layers": 0,
|
| 89 |
+
"model_type": "DiT",
|
| 90 |
+
"out_channels": 16,
|
| 91 |
+
"patch_size": 2,
|
| 92 |
+
"pos_embed": "ape",
|
| 93 |
+
"pos_embed_max_size": 64,
|
| 94 |
+
"qk_norm": true,
|
| 95 |
+
"repa_enable": true,
|
| 96 |
+
"repa_enc_depth": 8,
|
| 97 |
+
"repa_projector_dim": 2048,
|
| 98 |
+
"repa_z_dim": 768,
|
| 99 |
+
"sample_size": 32,
|
| 100 |
+
"sandwich_norm": false,
|
| 101 |
+
"shared_attention_layers": "all",
|
| 102 |
+
"text_hidden_size": 2048,
|
| 103 |
+
"text_hidden_states_index": -1,
|
| 104 |
+
"text_modulation_embeds_dim": null,
|
| 105 |
+
"timestep_conditioning": "adaln-zero",
|
| 106 |
+
"transformers_version": "4.43.3"
|
| 107 |
+
}
|
flops.txt
ADDED
|
@@ -0,0 +1,934 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
-------------------------- DeepSpeed Flops Profiler --------------------------
|
| 3 |
+
Profile Summary at step 2:
|
| 4 |
+
Notations:
|
| 5 |
+
data parallel size (dp_size), model parallel size(mp_size),
|
| 6 |
+
number of parameters (params), number of multiply-accumulate operations(MACs),
|
| 7 |
+
number of floating-point operations (flops), floating-point operations per second (FLOPS),
|
| 8 |
+
fwd latency (forward propagation latency), bwd latency (backward propagation latency),
|
| 9 |
+
step (weights update latency), iter latency (sum of fwd, bwd and step latency)
|
| 10 |
+
|
| 11 |
+
world size: 32
|
| 12 |
+
data parallel size: 32
|
| 13 |
+
model parallel size: 1
|
| 14 |
+
batch size per GPU: 16
|
| 15 |
+
params per GPU: 3.05 B
|
| 16 |
+
params of model = params per GPU * mp_size: 3.05 B
|
| 17 |
+
fwd MACs per GPU: 9.86 TMACs
|
| 18 |
+
fwd flops per GPU: 19.73 T
|
| 19 |
+
fwd flops of model = fwd flops per GPU * mp_size: 19.73 T
|
| 20 |
+
fwd latency: 158.91 ms
|
| 21 |
+
fwd FLOPS per GPU = fwd flops per GPU / fwd latency: 124.15 TFLOPS
|
| 22 |
+
bwd latency: 559.86 ms
|
| 23 |
+
bwd FLOPS per GPU = 2 * fwd flops per GPU / bwd latency: 70.48 TFLOPS
|
| 24 |
+
fwd+bwd FLOPS per GPU = 3 * fwd flops per GPU / (fwd+bwd latency): 82.34 TFLOPS
|
| 25 |
+
step latency: 167.69 ms
|
| 26 |
+
iter latency: 886.46 ms
|
| 27 |
+
FLOPS per GPU = 3 * fwd flops per GPU / iter latency: 66.77 TFLOPS
|
| 28 |
+
samples/second: 577.58
|
| 29 |
+
|
| 30 |
+
----------------------------- Aggregated Profile per GPU -----------------------------
|
| 31 |
+
Top 1 modules in terms of params, MACs or fwd latency at different model depths:
|
| 32 |
+
depth 0:
|
| 33 |
+
params - {'DiT': '3.05 B'}
|
| 34 |
+
MACs - {'DiT': '9.86 TMACs'}
|
| 35 |
+
fwd latency - {'DiT': '158.73 ms'}
|
| 36 |
+
depth 1:
|
| 37 |
+
params - {'ModuleList': '3.02 B'}
|
| 38 |
+
MACs - {'ModuleList': '9.8 TMACs'}
|
| 39 |
+
fwd latency - {'ModuleList': '154.18 ms'}
|
| 40 |
+
depth 2:
|
| 41 |
+
params - {'DiTLayer': '3.02 B'}
|
| 42 |
+
MACs - {'DiTLayer': '9.8 TMACs'}
|
| 43 |
+
fwd latency - {'DiTLayer': '154.18 ms'}
|
| 44 |
+
depth 3:
|
| 45 |
+
params - {'GemmaMLP': '1.51 B'}
|
| 46 |
+
MACs - {'GemmaMLP': '6.18 TMACs'}
|
| 47 |
+
fwd latency - {'DiTSelfAttention': '81.24 ms'}
|
| 48 |
+
|
| 49 |
+
------------------------------ Detailed Profile per GPU ------------------------------
|
| 50 |
+
Each module profile is listed after its name in the following order:
|
| 51 |
+
params, percentage of total params, MACs, percentage of total MACs, fwd latency, percentage of total fwd latency, fwd FLOPS
|
| 52 |
+
|
| 53 |
+
Note: 1. A module can have torch.nn.module or torch.nn.functional to compute logits (e.g. CrossEntropyLoss). They are not counted as submodules, thus not to be printed out. However they make up the difference between a parent's MACs (or latency) and the sum of its submodules'.
|
| 54 |
+
2. Number of floating-point operations is a theoretical estimation, thus FLOPS computed using that could be larger than the maximum system throughput.
|
| 55 |
+
3. The fwd latency listed in the top module's profile is directly captured at the module forward function in PyTorch, thus it's less than the fwd latency shown above which is captured in DeepSpeed.
|
| 56 |
+
|
| 57 |
+
DiT(
|
| 58 |
+
3.05 B = 100% Params, 9.86 TMACs = 100% MACs, 158.73 ms = 100% latency, 124.29 TFLOPS
|
| 59 |
+
(layers): ModuleList(
|
| 60 |
+
(0): DiTLayer(
|
| 61 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.24 ms = 3.3% latency, 124.79 TFLOPS
|
| 62 |
+
(input_layernorm): AdaLayerNormZero(
|
| 63 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 628.95 us = 0.4% latency, 1.28 TFLOPS
|
| 64 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 36.24 us = 0.02% latency, 904.2 MFLOPS)
|
| 65 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 184.54 us = 0.12% latency, 4.36 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 66 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.46 us = 0.15% latency, 0 FLOPS)
|
| 67 |
+
)
|
| 68 |
+
(self_attn): DiTSelfAttention(
|
| 69 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.76 ms = 1.74% latency, 87.27 TFLOPS
|
| 70 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.77 us = 0.29% latency, 0 FLOPS)
|
| 71 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 238.42 us = 0.15% latency, 0 FLOPS)
|
| 72 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 276.8 us = 0.17% latency, 248.26 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 73 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 160.93 us = 0.1% latency, 106.75 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 74 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.87 us = 0.1% latency, 113.12 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 75 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.09 us = 0.09% latency, 125.32 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 76 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.85 us = 0.09% latency, 125.54 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 77 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 251.05 us = 0.16% latency, 273.72 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 78 |
+
)
|
| 79 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.18 us = 0.15% latency, 0 FLOPS)
|
| 80 |
+
(mlp): GemmaMLP(
|
| 81 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.77% latency, 336.48 TFLOPS
|
| 82 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 343.8 us = 0.22% latency, 399.76 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 83 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 322.1 us = 0.2% latency, 426.69 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 84 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.36 us = 0.19% latency, 456.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 85 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 86.55 us = 0.05% latency, 387.71 GFLOPS)
|
| 86 |
+
)
|
| 87 |
+
)
|
| 88 |
+
(1): DiTLayer(
|
| 89 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.12 ms = 3.23% latency, 127.56 TFLOPS
|
| 90 |
+
(input_layernorm): AdaLayerNormZero(
|
| 91 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 585.56 us = 0.37% latency, 1.38 TFLOPS
|
| 92 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.38 us = 0.02% latency, 981.71 MFLOPS)
|
| 93 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.98 us = 0.1% latency, 5.03 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 94 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.56 us = 0.15% latency, 0 FLOPS)
|
| 95 |
+
)
|
| 96 |
+
(self_attn): DiTSelfAttention(
|
| 97 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.76 TFLOPS
|
| 98 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.77 us = 0.29% latency, 0 FLOPS)
|
| 99 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 248.19 us = 0.16% latency, 0 FLOPS)
|
| 100 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 248.67 us = 0.16% latency, 276.35 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 101 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 156.64 us = 0.1% latency, 109.68 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 102 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.73 us = 0.09% latency, 114.74 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 103 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.47 us = 0.09% latency, 123.18 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 104 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.61 us = 0.09% latency, 125.75 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 105 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 242.95 us = 0.15% latency, 282.86 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 106 |
+
)
|
| 107 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.94 us = 0.15% latency, 0 FLOPS)
|
| 108 |
+
(mlp): GemmaMLP(
|
| 109 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 341.33 TFLOPS
|
| 110 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 334.02 us = 0.21% latency, 411.46 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 111 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 319.96 us = 0.2% latency, 429.55 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 112 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.36 us = 0.19% latency, 456.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 113 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.68 us = 0.05% latency, 400.96 GFLOPS)
|
| 114 |
+
)
|
| 115 |
+
)
|
| 116 |
+
(2): DiTLayer(
|
| 117 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.12 ms = 3.23% latency, 127.59 TFLOPS
|
| 118 |
+
(input_layernorm): AdaLayerNormZero(
|
| 119 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 585.08 us = 0.37% latency, 1.38 TFLOPS
|
| 120 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.33 us = 0.02% latency, 954.44 MFLOPS)
|
| 121 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 160.69 us = 0.1% latency, 5.01 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 122 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 123 |
+
)
|
| 124 |
+
(self_attn): DiTSelfAttention(
|
| 125 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.83 TFLOPS
|
| 126 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.06 us = 0.29% latency, 0 FLOPS)
|
| 127 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 128 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 254.15 us = 0.16% latency, 270.38 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 129 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.26 us = 0.1% latency, 111.37 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 130 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 159.03 us = 0.1% latency, 108.03 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 131 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.47 us = 0.09% latency, 123.18 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 132 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.38 us = 0.09% latency, 125.97 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 133 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 238.9 us = 0.15% latency, 287.66 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 134 |
+
)
|
| 135 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.03 us = 0.15% latency, 0 FLOPS)
|
| 136 |
+
(mlp): GemmaMLP(
|
| 137 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 340.46 TFLOPS
|
| 138 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 333.55 us = 0.21% latency, 412.05 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 139 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 322.82 us = 0.2% latency, 425.75 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 140 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 299.93 us = 0.19% latency, 458.24 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 141 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 85.59 us = 0.05% latency, 392.03 GFLOPS)
|
| 142 |
+
)
|
| 143 |
+
)
|
| 144 |
+
(3): DiTLayer(
|
| 145 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 127.97 TFLOPS
|
| 146 |
+
(input_layernorm): AdaLayerNormZero(
|
| 147 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 602.25 us = 0.38% latency, 1.34 TFLOPS
|
| 148 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.14 us = 0.02% latency, 988.77 MFLOPS)
|
| 149 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 164.27 us = 0.1% latency, 4.9 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 150 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.04 us = 0.15% latency, 0 FLOPS)
|
| 151 |
+
)
|
| 152 |
+
(self_attn): DiTSelfAttention(
|
| 153 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.68 ms = 1.69% latency, 89.59 TFLOPS
|
| 154 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.29 us = 0.29% latency, 0 FLOPS)
|
| 155 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.84 us = 0.15% latency, 0 FLOPS)
|
| 156 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.48 us = 0.16% latency, 277.68 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 157 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.26 us = 0.1% latency, 111.37 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 158 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.16 us = 0.1% latency, 113.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 159 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.24 us = 0.09% latency, 123.39 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 160 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.85 us = 0.09% latency, 125.54 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 161 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 233.41 us = 0.15% latency, 294.41 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 162 |
+
)
|
| 163 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 164 |
+
(mlp): GemmaMLP(
|
| 165 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.2 ms = 0.76% latency, 342.82 TFLOPS
|
| 166 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 331.88 us = 0.21% latency, 414.12 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 167 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 317.81 us = 0.2% latency, 432.45 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 168 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.41 us = 0.19% latency, 457.51 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 169 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.49 us = 0.05% latency, 406.76 GFLOPS)
|
| 170 |
+
)
|
| 171 |
+
)
|
| 172 |
+
(4): DiTLayer(
|
| 173 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.09 ms = 3.21% latency, 128.43 TFLOPS
|
| 174 |
+
(input_layernorm): AdaLayerNormZero(
|
| 175 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 581.98 us = 0.37% latency, 1.38 TFLOPS
|
| 176 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.14 us = 0.02% latency, 988.77 MFLOPS)
|
| 177 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.98 us = 0.1% latency, 5.03 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 178 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.04 us = 0.15% latency, 0 FLOPS)
|
| 179 |
+
)
|
| 180 |
+
(self_attn): DiTSelfAttention(
|
| 181 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.69% latency, 89.53 TFLOPS
|
| 182 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.53 us = 0.29% latency, 0 FLOPS)
|
| 183 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.13 us = 0.15% latency, 0 FLOPS)
|
| 184 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.24 us = 0.16% latency, 277.95 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 185 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 157.36 us = 0.1% latency, 109.18 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 186 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.63 us = 0.1% latency, 113.3 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 187 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.47 us = 0.09% latency, 123.18 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 188 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.57 us = 0.09% latency, 124.88 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 189 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 237.46 us = 0.15% latency, 289.39 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 190 |
+
)
|
| 191 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.61 us = 0.15% latency, 0 FLOPS)
|
| 192 |
+
(mlp): GemmaMLP(
|
| 193 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.2 ms = 0.76% latency, 343.16 TFLOPS
|
| 194 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 329.73 us = 0.21% latency, 416.82 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 195 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 317.81 us = 0.2% latency, 432.45 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 196 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 302.55 us = 0.19% latency, 454.26 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 197 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.25 us = 0.05% latency, 407.93 GFLOPS)
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
(5): DiTLayer(
|
| 201 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.09 ms = 3.21% latency, 128.42 TFLOPS
|
| 202 |
+
(input_layernorm): AdaLayerNormZero(
|
| 203 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 581.5 us = 0.37% latency, 1.38 TFLOPS
|
| 204 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.33 us = 0.02% latency, 954.44 MFLOPS)
|
| 205 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 158.55 us = 0.1% latency, 5.08 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 206 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.56 us = 0.15% latency, 0 FLOPS)
|
| 207 |
+
)
|
| 208 |
+
(self_attn): DiTSelfAttention(
|
| 209 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.69% latency, 89.58 TFLOPS
|
| 210 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.58 us = 0.29% latency, 0 FLOPS)
|
| 211 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.84 us = 0.15% latency, 0 FLOPS)
|
| 212 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 242.23 us = 0.15% latency, 283.69 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 213 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 156.4 us = 0.1% latency, 109.84 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 214 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.16 us = 0.1% latency, 113.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 215 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.62 us = 0.09% latency, 121.31 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 216 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.14 us = 0.09% latency, 126.2 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 217 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 239.85 us = 0.15% latency, 286.51 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 218 |
+
)
|
| 219 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.37 us = 0.15% latency, 0 FLOPS)
|
| 220 |
+
(mlp): GemmaMLP(
|
| 221 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.2 ms = 0.76% latency, 342.68 TFLOPS
|
| 222 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 335.93 us = 0.21% latency, 409.13 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 223 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 320.2 us = 0.2% latency, 429.23 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 224 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 297.78 us = 0.19% latency, 461.54 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 225 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.06 us = 0.05% latency, 413.93 GFLOPS)
|
| 226 |
+
)
|
| 227 |
+
)
|
| 228 |
+
(6): DiTLayer(
|
| 229 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.16 ms = 3.25% latency, 126.68 TFLOPS
|
| 230 |
+
(input_layernorm): AdaLayerNormZero(
|
| 231 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 582.22 us = 0.37% latency, 1.38 TFLOPS
|
| 232 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.38 us = 0.02% latency, 981.71 MFLOPS)
|
| 233 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.03 us = 0.1% latency, 5.06 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 234 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.09 us = 0.15% latency, 0 FLOPS)
|
| 235 |
+
)
|
| 236 |
+
(self_attn): DiTSelfAttention(
|
| 237 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.72 ms = 1.71% latency, 88.51 TFLOPS
|
| 238 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.53 us = 0.29% latency, 0 FLOPS)
|
| 239 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.56 us = 0.15% latency, 0 FLOPS)
|
| 240 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 249.86 us = 0.16% latency, 275.03 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 241 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 159.98 us = 0.1% latency, 107.39 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 242 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 152.11 us = 0.1% latency, 112.94 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 243 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 143.05 us = 0.09% latency, 120.1 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 244 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.24 us = 0.09% latency, 123.39 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 245 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 236.99 us = 0.15% latency, 289.97 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 246 |
+
)
|
| 247 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 248 |
+
(mlp): GemmaMLP(
|
| 249 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.78% latency, 334.53 TFLOPS
|
| 250 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 336.65 us = 0.21% latency, 408.26 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 251 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 330.69 us = 0.21% latency, 415.62 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 252 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 304.7 us = 0.19% latency, 451.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 253 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 85.59 us = 0.05% latency, 392.03 GFLOPS)
|
| 254 |
+
)
|
| 255 |
+
)
|
| 256 |
+
(7): DiTLayer(
|
| 257 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.19 ms = 3.27% latency, 126.06 TFLOPS
|
| 258 |
+
(input_layernorm): AdaLayerNormZero(
|
| 259 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 600.58 us = 0.38% latency, 1.34 TFLOPS
|
| 260 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.81 us = 0.02% latency, 941.36 MFLOPS)
|
| 261 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 169.99 us = 0.11% latency, 4.74 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 262 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.04 us = 0.15% latency, 0 FLOPS)
|
| 263 |
+
)
|
| 264 |
+
(self_attn): DiTSelfAttention(
|
| 265 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.72 ms = 1.71% latency, 88.39 TFLOPS
|
| 266 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.01 us = 0.29% latency, 0 FLOPS)
|
| 267 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 268 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 257.25 us = 0.16% latency, 267.13 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 269 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 156.16 us = 0.1% latency, 110.01 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 270 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.68 us = 0.09% latency, 114.02 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 271 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.95 us = 0.09% latency, 122.76 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 272 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.91 us = 0.09% latency, 121.92 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 273 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.72 us = 0.16% latency, 277.41 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 274 |
+
)
|
| 275 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.61 us = 0.15% latency, 0 FLOPS)
|
| 276 |
+
(mlp): GemmaMLP(
|
| 277 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.78% latency, 334.72 TFLOPS
|
| 278 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 340.7 us = 0.21% latency, 403.4 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 279 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 321.63 us = 0.2% latency, 427.32 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 280 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 311.14 us = 0.2% latency, 441.73 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 281 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.45 us = 0.05% latency, 402.11 GFLOPS)
|
| 282 |
+
)
|
| 283 |
+
)
|
| 284 |
+
(8): DiTLayer(
|
| 285 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.15 ms = 3.24% latency, 127.04 TFLOPS
|
| 286 |
+
(input_layernorm): AdaLayerNormZero(
|
| 287 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 592.95 us = 0.37% latency, 1.36 TFLOPS
|
| 288 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.09 us = 0.02% latency, 961.11 MFLOPS)
|
| 289 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 154.02 us = 0.1% latency, 5.23 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 290 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.04 us = 0.15% latency, 0 FLOPS)
|
| 291 |
+
)
|
| 292 |
+
(self_attn): DiTSelfAttention(
|
| 293 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.72 ms = 1.71% latency, 88.56 TFLOPS
|
| 294 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.53 us = 0.29% latency, 0 FLOPS)
|
| 295 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 296 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 249.62 us = 0.16% latency, 275.29 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 297 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 153.06 us = 0.1% latency, 112.24 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 298 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.73 us = 0.09% latency, 114.74 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 299 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.86 us = 0.09% latency, 121.11 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 300 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 148.53 us = 0.09% latency, 115.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 301 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.62 us = 0.15% latency, 280.93 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 302 |
+
)
|
| 303 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.18 us = 0.15% latency, 0 FLOPS)
|
| 304 |
+
(mlp): GemmaMLP(
|
| 305 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 339.19 TFLOPS
|
| 306 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 334.98 us = 0.21% latency, 410.29 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 307 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 318.29 us = 0.2% latency, 431.81 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 308 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 306.13 us = 0.19% latency, 448.96 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 309 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.21 us = 0.05% latency, 403.26 GFLOPS)
|
| 310 |
+
)
|
| 311 |
+
)
|
| 312 |
+
(9): DiTLayer(
|
| 313 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.17 ms = 3.26% latency, 126.46 TFLOPS
|
| 314 |
+
(input_layernorm): AdaLayerNormZero(
|
| 315 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 590.8 us = 0.37% latency, 1.36 TFLOPS
|
| 316 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.86 us = 0.02% latency, 967.88 MFLOPS)
|
| 317 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 161.17 us = 0.1% latency, 5 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 318 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.76 us = 0.15% latency, 0 FLOPS)
|
| 319 |
+
)
|
| 320 |
+
(self_attn): DiTSelfAttention(
|
| 321 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.72 ms = 1.71% latency, 88.44 TFLOPS
|
| 322 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.01 us = 0.29% latency, 0 FLOPS)
|
| 323 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 324 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 255.11 us = 0.16% latency, 269.37 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 325 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 161.17 us = 0.1% latency, 106.59 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 326 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 148.53 us = 0.09% latency, 115.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 327 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.67 us = 0.09% latency, 122.13 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 328 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.57 us = 0.09% latency, 124.88 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 329 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 255.58 us = 0.16% latency, 268.87 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 330 |
+
)
|
| 331 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.75 us = 0.15% latency, 0 FLOPS)
|
| 332 |
+
(mlp): GemmaMLP(
|
| 333 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.78% latency, 334.14 TFLOPS
|
| 334 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 337.84 us = 0.21% latency, 406.82 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 335 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 338.55 us = 0.21% latency, 405.96 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 336 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.17 us = 0.19% latency, 457.87 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 337 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.45 us = 0.05% latency, 402.11 GFLOPS)
|
| 338 |
+
)
|
| 339 |
+
)
|
| 340 |
+
(10): DiTLayer(
|
| 341 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 127.93 TFLOPS
|
| 342 |
+
(input_layernorm): AdaLayerNormZero(
|
| 343 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 581.98 us = 0.37% latency, 1.38 TFLOPS
|
| 344 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.86 us = 0.02% latency, 967.88 MFLOPS)
|
| 345 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 161.89 us = 0.1% latency, 4.97 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 346 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 347 |
+
)
|
| 348 |
+
(self_attn): DiTSelfAttention(
|
| 349 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.7% latency, 89.31 TFLOPS
|
| 350 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.58 us = 0.29% latency, 0 FLOPS)
|
| 351 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 237.94 us = 0.15% latency, 0 FLOPS)
|
| 352 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.62 us = 0.15% latency, 280.93 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 353 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 157.12 us = 0.1% latency, 109.34 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 354 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.68 us = 0.09% latency, 114.02 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 355 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.33 us = 0.09% latency, 125.1 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 356 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 135.9 us = 0.09% latency, 126.42 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 357 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.48 us = 0.16% latency, 277.68 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 358 |
+
)
|
| 359 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.94 us = 0.15% latency, 0 FLOPS)
|
| 360 |
+
(mlp): GemmaMLP(
|
| 361 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 340.39 TFLOPS
|
| 362 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 336.17 us = 0.21% latency, 408.84 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 363 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 319.48 us = 0.2% latency, 430.19 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 364 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.12 us = 0.19% latency, 456.42 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 365 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.06 us = 0.05% latency, 413.93 GFLOPS)
|
| 366 |
+
)
|
| 367 |
+
)
|
| 368 |
+
(11): DiTLayer(
|
| 369 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.07 ms = 3.19% latency, 128.95 TFLOPS
|
| 370 |
+
(input_layernorm): AdaLayerNormZero(
|
| 371 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 577.21 us = 0.36% latency, 1.4 TFLOPS
|
| 372 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 32.66 us = 0.02% latency, 1 GFLOPS)
|
| 373 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.03 us = 0.1% latency, 5.06 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 374 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.94 us = 0.15% latency, 0 FLOPS)
|
| 375 |
+
)
|
| 376 |
+
(self_attn): DiTSelfAttention(
|
| 377 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.67 ms = 1.68% latency, 90.06 TFLOPS
|
| 378 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.1 us = 0.29% latency, 0 FLOPS)
|
| 379 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.6 us = 0.15% latency, 0 FLOPS)
|
| 380 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.14 us = 0.15% latency, 281.47 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 381 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 152.59 us = 0.1% latency, 112.59 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 382 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.97 us = 0.09% latency, 114.56 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 383 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.14 us = 0.09% latency, 121.72 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 384 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 134.71 us = 0.08% latency, 127.54 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 385 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 235.32 us = 0.15% latency, 292.03 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 386 |
+
)
|
| 387 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.99 us = 0.15% latency, 0 FLOPS)
|
| 388 |
+
(mlp): GemmaMLP(
|
| 389 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.2 ms = 0.76% latency, 342.48 TFLOPS
|
| 390 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 334.98 us = 0.21% latency, 410.29 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 391 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 317.57 us = 0.2% latency, 432.78 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 392 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.17 us = 0.19% latency, 457.87 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 393 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 80.82 us = 0.05% latency, 415.15 GFLOPS)
|
| 394 |
+
)
|
| 395 |
+
)
|
| 396 |
+
(12): DiTLayer(
|
| 397 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 128.03 TFLOPS
|
| 398 |
+
(input_layernorm): AdaLayerNormZero(
|
| 399 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 587.22 us = 0.37% latency, 1.37 TFLOPS
|
| 400 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.33 us = 0.02% latency, 954.44 MFLOPS)
|
| 401 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 163.32 us = 0.1% latency, 4.93 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 402 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.42 us = 0.15% latency, 0 FLOPS)
|
| 403 |
+
)
|
| 404 |
+
(self_attn): DiTSelfAttention(
|
| 405 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.7 ms = 1.7% latency, 89.2 TFLOPS
|
| 406 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.29 us = 0.29% latency, 0 FLOPS)
|
| 407 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 233.41 us = 0.15% latency, 0 FLOPS)
|
| 408 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 245.33 us = 0.15% latency, 280.11 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 409 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 159.26 us = 0.1% latency, 107.87 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 410 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.16 us = 0.1% latency, 113.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 411 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.91 us = 0.09% latency, 121.92 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 412 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.38 us = 0.09% latency, 125.97 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 413 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 235.32 us = 0.15% latency, 292.03 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 414 |
+
)
|
| 415 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.09 us = 0.15% latency, 0 FLOPS)
|
| 416 |
+
(mlp): GemmaMLP(
|
| 417 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.2 ms = 0.76% latency, 343.16 TFLOPS
|
| 418 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 332.12 us = 0.21% latency, 413.83 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 419 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 317.57 us = 0.2% latency, 432.78 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 420 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.41 us = 0.19% latency, 457.51 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 421 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.54 us = 0.05% latency, 411.51 GFLOPS)
|
| 422 |
+
)
|
| 423 |
+
)
|
| 424 |
+
(13): DiTLayer(
|
| 425 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 127.94 TFLOPS
|
| 426 |
+
(input_layernorm): AdaLayerNormZero(
|
| 427 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 582.7 us = 0.37% latency, 1.38 TFLOPS
|
| 428 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 32.19 us = 0.02% latency, 1.02 GFLOPS)
|
| 429 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 158.79 us = 0.1% latency, 5.07 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 430 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.56 us = 0.15% latency, 0 FLOPS)
|
| 431 |
+
)
|
| 432 |
+
(self_attn): DiTSelfAttention(
|
| 433 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.68 ms = 1.69% latency, 89.82 TFLOPS
|
| 434 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.53 us = 0.29% latency, 0 FLOPS)
|
| 435 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 232.93 us = 0.15% latency, 0 FLOPS)
|
| 436 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 248.43 us = 0.16% latency, 276.61 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 437 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.73 us = 0.1% latency, 111.03 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 438 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.68 us = 0.09% latency, 114.02 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 439 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 138.04 us = 0.09% latency, 124.45 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 440 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 134.94 us = 0.09% latency, 127.31 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 441 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 234.84 us = 0.15% latency, 292.62 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 442 |
+
)
|
| 443 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.99 us = 0.15% latency, 0 FLOPS)
|
| 444 |
+
(mlp): GemmaMLP(
|
| 445 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.78% latency, 334.98 TFLOPS
|
| 446 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 349.76 us = 0.22% latency, 392.95 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 447 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 322.82 us = 0.2% latency, 425.75 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 448 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 302.79 us = 0.19% latency, 453.91 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 449 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.73 us = 0.05% latency, 405.58 GFLOPS)
|
| 450 |
+
)
|
| 451 |
+
)
|
| 452 |
+
(14): DiTLayer(
|
| 453 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 127.87 TFLOPS
|
| 454 |
+
(input_layernorm): AdaLayerNormZero(
|
| 455 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 584.13 us = 0.37% latency, 1.38 TFLOPS
|
| 456 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.38 us = 0.02% latency, 981.71 MFLOPS)
|
| 457 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 160.69 us = 0.1% latency, 5.01 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 458 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.09 us = 0.15% latency, 0 FLOPS)
|
| 459 |
+
)
|
| 460 |
+
(self_attn): DiTSelfAttention(
|
| 461 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.7% latency, 89.25 TFLOPS
|
| 462 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.34 us = 0.29% latency, 0 FLOPS)
|
| 463 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 244.14 us = 0.15% latency, 0 FLOPS)
|
| 464 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.62 us = 0.15% latency, 280.93 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 465 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.97 us = 0.1% latency, 110.86 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 466 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 148.53 us = 0.09% latency, 115.66 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 467 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 138.04 us = 0.09% latency, 124.45 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 468 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 135.9 us = 0.09% latency, 126.42 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 469 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 241.52 us = 0.15% latency, 284.53 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 470 |
+
)
|
| 471 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.33 us = 0.15% latency, 0 FLOPS)
|
| 472 |
+
(mlp): GemmaMLP(
|
| 473 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 340.99 TFLOPS
|
| 474 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 337.12 us = 0.21% latency, 407.68 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 475 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 319.72 us = 0.2% latency, 429.87 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 476 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 299.45 us = 0.19% latency, 458.97 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 477 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.3 us = 0.05% latency, 412.72 GFLOPS)
|
| 478 |
+
)
|
| 479 |
+
)
|
| 480 |
+
(15): DiTLayer(
|
| 481 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.11 ms = 3.22% latency, 127.93 TFLOPS
|
| 482 |
+
(input_layernorm): AdaLayerNormZero(
|
| 483 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 578.88 us = 0.36% latency, 1.39 TFLOPS
|
| 484 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 32.66 us = 0.02% latency, 1 GFLOPS)
|
| 485 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 158.55 us = 0.1% latency, 5.08 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 486 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.33 us = 0.15% latency, 0 FLOPS)
|
| 487 |
+
)
|
| 488 |
+
(self_attn): DiTSelfAttention(
|
| 489 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.7% latency, 88.87 TFLOPS
|
| 490 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.25 us = 0.29% latency, 0 FLOPS)
|
| 491 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 231.74 us = 0.15% latency, 0 FLOPS)
|
| 492 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 246.29 us = 0.16% latency, 279.02 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 493 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 155.45 us = 0.1% latency, 110.52 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 494 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.68 us = 0.09% latency, 114.02 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 495 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.86 us = 0.09% latency, 121.11 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 496 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.44 us = 0.09% latency, 114.2 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 497 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 239.37 us = 0.15% latency, 287.08 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 498 |
+
)
|
| 499 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.46 us = 0.15% latency, 0 FLOPS)
|
| 500 |
+
(mlp): GemmaMLP(
|
| 501 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 342.07 TFLOPS
|
| 502 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 335.22 us = 0.21% latency, 410 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 503 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 319 us = 0.2% latency, 430.84 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 504 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 298.74 us = 0.19% latency, 460.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 505 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.3 us = 0.05% latency, 412.72 GFLOPS)
|
| 506 |
+
)
|
| 507 |
+
)
|
| 508 |
+
(16): DiTLayer(
|
| 509 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.1 ms = 3.22% latency, 128.09 TFLOPS
|
| 510 |
+
(input_layernorm): AdaLayerNormZero(
|
| 511 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 578.64 us = 0.36% latency, 1.39 TFLOPS
|
| 512 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.14 us = 0.02% latency, 988.77 MFLOPS)
|
| 513 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.26 us = 0.1% latency, 5.06 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 514 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.9 us = 0.15% latency, 0 FLOPS)
|
| 515 |
+
)
|
| 516 |
+
(self_attn): DiTSelfAttention(
|
| 517 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.69% latency, 89.54 TFLOPS
|
| 518 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.06 us = 0.29% latency, 0 FLOPS)
|
| 519 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 231.27 us = 0.15% latency, 0 FLOPS)
|
| 520 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 245.33 us = 0.15% latency, 280.11 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 521 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 153.54 us = 0.1% latency, 111.89 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 522 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 147.58 us = 0.09% latency, 116.41 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 523 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.19 us = 0.09% latency, 122.55 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 524 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.85 us = 0.09% latency, 125.54 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 525 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.14 us = 0.15% latency, 281.47 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 526 |
+
)
|
| 527 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.46 us = 0.15% latency, 0 FLOPS)
|
| 528 |
+
(mlp): GemmaMLP(
|
| 529 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 339.12 TFLOPS
|
| 530 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 338.79 us = 0.21% latency, 405.67 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 531 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 319.96 us = 0.2% latency, 429.55 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 532 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.6 us = 0.19% latency, 455.7 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 533 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.54 us = 0.05% latency, 411.51 GFLOPS)
|
| 534 |
+
)
|
| 535 |
+
)
|
| 536 |
+
(17): DiTLayer(
|
| 537 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.1 ms = 3.21% latency, 128.11 TFLOPS
|
| 538 |
+
(input_layernorm): AdaLayerNormZero(
|
| 539 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 573.16 us = 0.36% latency, 1.41 TFLOPS
|
| 540 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 32.9 us = 0.02% latency, 995.93 MFLOPS)
|
| 541 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 156.88 us = 0.1% latency, 5.13 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 542 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.75 us = 0.15% latency, 0 FLOPS)
|
| 543 |
+
)
|
| 544 |
+
(self_attn): DiTSelfAttention(
|
| 545 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.69% latency, 89.43 TFLOPS
|
| 546 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.82 us = 0.29% latency, 0 FLOPS)
|
| 547 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.13 us = 0.15% latency, 0 FLOPS)
|
| 548 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.72 us = 0.16% latency, 277.41 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 549 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 157.59 us = 0.1% latency, 109.01 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 550 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 148.3 us = 0.09% latency, 115.85 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 551 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.24 us = 0.09% latency, 123.39 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 552 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 135.66 us = 0.09% latency, 126.64 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 553 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 242.47 us = 0.15% latency, 283.41 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 554 |
+
)
|
| 555 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 556 |
+
(mlp): GemmaMLP(
|
| 557 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 339.06 TFLOPS
|
| 558 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 339.03 us = 0.21% latency, 405.39 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 559 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 320.67 us = 0.2% latency, 428.6 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 560 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.6 us = 0.19% latency, 455.7 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 561 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.02 us = 0.05% latency, 409.12 GFLOPS)
|
| 562 |
+
)
|
| 563 |
+
)
|
| 564 |
+
(18): DiTLayer(
|
| 565 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.19 ms = 3.27% latency, 125.93 TFLOPS
|
| 566 |
+
(input_layernorm): AdaLayerNormZero(
|
| 567 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 584.6 us = 0.37% latency, 1.38 TFLOPS
|
| 568 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.14 us = 0.02% latency, 988.77 MFLOPS)
|
| 569 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 160.22 us = 0.1% latency, 5.03 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 570 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.76 us = 0.15% latency, 0 FLOPS)
|
| 571 |
+
)
|
| 572 |
+
(self_attn): DiTSelfAttention(
|
| 573 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.74 ms = 1.72% latency, 87.86 TFLOPS
|
| 574 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 469.68 us = 0.3% latency, 0 FLOPS)
|
| 575 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 239.85 us = 0.15% latency, 0 FLOPS)
|
| 576 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 245.33 us = 0.15% latency, 280.11 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 577 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 158.79 us = 0.1% latency, 108.19 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 578 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 153.78 us = 0.1% latency, 111.72 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 579 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 143.77 us = 0.09% latency, 119.5 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 580 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.91 us = 0.09% latency, 121.92 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 581 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 249.86 us = 0.16% latency, 275.03 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 582 |
+
)
|
| 583 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 584 |
+
(mlp): GemmaMLP(
|
| 585 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.24 ms = 0.78% latency, 332.22 TFLOPS
|
| 586 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 349.28 us = 0.22% latency, 393.49 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 587 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 328.06 us = 0.21% latency, 418.94 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 588 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 306.61 us = 0.19% latency, 448.26 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 589 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.92 us = 0.05% latency, 399.82 GFLOPS)
|
| 590 |
+
)
|
| 591 |
+
)
|
| 592 |
+
(19): DiTLayer(
|
| 593 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.26 ms = 3.32% latency, 124.18 TFLOPS
|
| 594 |
+
(input_layernorm): AdaLayerNormZero(
|
| 595 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 604.15 us = 0.38% latency, 1.33 TFLOPS
|
| 596 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 37.67 us = 0.02% latency, 869.87 MFLOPS)
|
| 597 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 167.61 us = 0.11% latency, 4.8 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 598 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.33 us = 0.15% latency, 0 FLOPS)
|
| 599 |
+
)
|
| 600 |
+
(self_attn): DiTSelfAttention(
|
| 601 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.78 ms = 1.75% latency, 86.61 TFLOPS
|
| 602 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 465.63 us = 0.29% latency, 0 FLOPS)
|
| 603 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 250.34 us = 0.16% latency, 0 FLOPS)
|
| 604 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 255.11 us = 0.16% latency, 269.37 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 605 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 166.18 us = 0.1% latency, 103.38 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 606 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 156.88 us = 0.1% latency, 109.51 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 607 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.01 us = 0.09% latency, 115.29 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 608 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.38 us = 0.09% latency, 121.51 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 609 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 253.68 us = 0.16% latency, 270.89 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 610 |
+
)
|
| 611 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.37 us = 0.15% latency, 0 FLOPS)
|
| 612 |
+
(mlp): GemmaMLP(
|
| 613 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.25 ms = 0.79% latency, 330.25 TFLOPS
|
| 614 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 349.76 us = 0.22% latency, 392.95 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 615 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 326.4 us = 0.21% latency, 421.08 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 616 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 311.61 us = 0.2% latency, 441.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 617 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.92 us = 0.05% latency, 399.82 GFLOPS)
|
| 618 |
+
)
|
| 619 |
+
)
|
| 620 |
+
(20): DiTLayer(
|
| 621 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.13 ms = 3.23% latency, 127.45 TFLOPS
|
| 622 |
+
(input_layernorm): AdaLayerNormZero(
|
| 623 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 586.75 us = 0.37% latency, 1.37 TFLOPS
|
| 624 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.57 us = 0.02% latency, 947.85 MFLOPS)
|
| 625 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 161.89 us = 0.1% latency, 4.97 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 626 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 627 |
+
)
|
| 628 |
+
(self_attn): DiTSelfAttention(
|
| 629 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.69% latency, 89.53 TFLOPS
|
| 630 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.25 us = 0.29% latency, 0 FLOPS)
|
| 631 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 232.93 us = 0.15% latency, 0 FLOPS)
|
| 632 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 243.43 us = 0.15% latency, 282.3 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 633 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.97 us = 0.1% latency, 110.86 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 634 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 147.1 us = 0.09% latency, 116.79 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 635 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 142.57 us = 0.09% latency, 120.5 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 636 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 135.9 us = 0.09% latency, 126.42 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 637 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 241.99 us = 0.15% latency, 283.97 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 638 |
+
)
|
| 639 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.33 us = 0.15% latency, 0 FLOPS)
|
| 640 |
+
(mlp): GemmaMLP(
|
| 641 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.77% latency, 335.37 TFLOPS
|
| 642 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 342.37 us = 0.22% latency, 401.44 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 643 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 322.82 us = 0.2% latency, 425.75 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 644 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 307.56 us = 0.19% latency, 446.87 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 645 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.97 us = 0.05% latency, 404.42 GFLOPS)
|
| 646 |
+
)
|
| 647 |
+
)
|
| 648 |
+
(21): DiTLayer(
|
| 649 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.15 ms = 3.25% latency, 126.85 TFLOPS
|
| 650 |
+
(input_layernorm): AdaLayerNormZero(
|
| 651 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 586.03 us = 0.37% latency, 1.37 TFLOPS
|
| 652 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.09 us = 0.02% latency, 961.11 MFLOPS)
|
| 653 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 160.22 us = 0.1% latency, 5.03 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 654 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.33 us = 0.15% latency, 0 FLOPS)
|
| 655 |
+
)
|
| 656 |
+
(self_attn): DiTSelfAttention(
|
| 657 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.73 ms = 1.72% latency, 88.03 TFLOPS
|
| 658 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.72 us = 0.29% latency, 0 FLOPS)
|
| 659 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 660 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 256.06 us = 0.16% latency, 268.37 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 661 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 163.79 us = 0.1% latency, 104.89 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 662 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.87 us = 0.1% latency, 113.12 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 663 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.62 us = 0.09% latency, 121.31 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 664 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.61 us = 0.09% latency, 125.75 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 665 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 248.67 us = 0.16% latency, 276.35 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 666 |
+
)
|
| 667 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.9 us = 0.15% latency, 0 FLOPS)
|
| 668 |
+
(mlp): GemmaMLP(
|
| 669 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 340.59 TFLOPS
|
| 670 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 337.6 us = 0.21% latency, 407.11 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 671 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 320.67 us = 0.2% latency, 428.6 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 672 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.41 us = 0.19% latency, 457.51 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 673 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.78 us = 0.05% latency, 410.31 GFLOPS)
|
| 674 |
+
)
|
| 675 |
+
)
|
| 676 |
+
(22): DiTLayer(
|
| 677 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.14 ms = 3.24% latency, 127.25 TFLOPS
|
| 678 |
+
(input_layernorm): AdaLayerNormZero(
|
| 679 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 585.79 us = 0.37% latency, 1.37 TFLOPS
|
| 680 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.09 us = 0.02% latency, 961.11 MFLOPS)
|
| 681 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 163.08 us = 0.1% latency, 4.94 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 682 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.52 us = 0.15% latency, 0 FLOPS)
|
| 683 |
+
)
|
| 684 |
+
(self_attn): DiTSelfAttention(
|
| 685 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.84 TFLOPS
|
| 686 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.77 us = 0.29% latency, 0 FLOPS)
|
| 687 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 239.37 us = 0.15% latency, 0 FLOPS)
|
| 688 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 258.21 us = 0.16% latency, 266.14 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 689 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 154.5 us = 0.1% latency, 111.2 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 690 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.01 us = 0.09% latency, 115.29 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 691 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.95 us = 0.09% latency, 122.76 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 692 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.33 us = 0.09% latency, 125.1 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 693 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 237.94 us = 0.15% latency, 288.81 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 694 |
+
)
|
| 695 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.37 us = 0.15% latency, 0 FLOPS)
|
| 696 |
+
(mlp): GemmaMLP(
|
| 697 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 337.86 TFLOPS
|
| 698 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 340.22 us = 0.21% latency, 403.97 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 699 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 323.53 us = 0.2% latency, 424.81 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 700 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.12 us = 0.19% latency, 456.42 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 701 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.25 us = 0.05% latency, 407.93 GFLOPS)
|
| 702 |
+
)
|
| 703 |
+
)
|
| 704 |
+
(23): DiTLayer(
|
| 705 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.15 ms = 3.25% latency, 126.88 TFLOPS
|
| 706 |
+
(input_layernorm): AdaLayerNormZero(
|
| 707 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 595.81 us = 0.38% latency, 1.35 TFLOPS
|
| 708 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.81 us = 0.02% latency, 941.36 MFLOPS)
|
| 709 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 169.52 us = 0.11% latency, 4.75 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 710 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.61 us = 0.15% latency, 0 FLOPS)
|
| 711 |
+
)
|
| 712 |
+
(self_attn): DiTSelfAttention(
|
| 713 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.66 TFLOPS
|
| 714 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 466.35 us = 0.29% latency, 0 FLOPS)
|
| 715 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 238.9 us = 0.15% latency, 0 FLOPS)
|
| 716 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 249.15 us = 0.16% latency, 275.82 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 717 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 153.54 us = 0.1% latency, 111.89 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 718 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 150.2 us = 0.09% latency, 114.38 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 719 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 142.34 us = 0.09% latency, 120.7 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 720 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.09 us = 0.09% latency, 125.32 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 721 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 243.66 us = 0.15% latency, 282.03 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 722 |
+
)
|
| 723 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.27 us = 0.15% latency, 0 FLOPS)
|
| 724 |
+
(mlp): GemmaMLP(
|
| 725 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 337.34 TFLOPS
|
| 726 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 342.61 us = 0.22% latency, 401.16 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 727 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 320.43 us = 0.2% latency, 428.91 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 728 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 303.27 us = 0.19% latency, 453.19 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 729 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.73 us = 0.05% latency, 405.58 GFLOPS)
|
| 730 |
+
)
|
| 731 |
+
)
|
| 732 |
+
(24): DiTLayer(
|
| 733 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.12 ms = 3.23% latency, 127.67 TFLOPS
|
| 734 |
+
(input_layernorm): AdaLayerNormZero(
|
| 735 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 581.98 us = 0.37% latency, 1.38 TFLOPS
|
| 736 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 35.29 us = 0.02% latency, 928.64 MFLOPS)
|
| 737 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 160.22 us = 0.1% latency, 5.03 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 738 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 739 |
+
)
|
| 740 |
+
(self_attn): DiTSelfAttention(
|
| 741 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.7% latency, 89.29 TFLOPS
|
| 742 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.34 us = 0.29% latency, 0 FLOPS)
|
| 743 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 744 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 246.76 us = 0.16% latency, 278.48 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 745 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 157.83 us = 0.1% latency, 108.85 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 746 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.01 us = 0.09% latency, 115.29 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 747 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 140.67 us = 0.09% latency, 122.13 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 748 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 136.85 us = 0.09% latency, 125.54 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 749 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 244.86 us = 0.15% latency, 280.65 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 750 |
+
)
|
| 751 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 752 |
+
(mlp): GemmaMLP(
|
| 753 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 337.6 TFLOPS
|
| 754 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 340.22 us = 0.21% latency, 403.97 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 755 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 326.16 us = 0.21% latency, 421.39 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 756 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 303.51 us = 0.19% latency, 452.84 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 757 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.25 us = 0.05% latency, 407.93 GFLOPS)
|
| 758 |
+
)
|
| 759 |
+
)
|
| 760 |
+
(25): DiTLayer(
|
| 761 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.12 ms = 3.22% latency, 127.78 TFLOPS
|
| 762 |
+
(input_layernorm): AdaLayerNormZero(
|
| 763 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 585.56 us = 0.37% latency, 1.38 TFLOPS
|
| 764 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.86 us = 0.02% latency, 967.88 MFLOPS)
|
| 765 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 159.74 us = 0.1% latency, 5.04 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 766 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 241.04 us = 0.15% latency, 0 FLOPS)
|
| 767 |
+
)
|
| 768 |
+
(self_attn): DiTSelfAttention(
|
| 769 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.69 ms = 1.7% latency, 89.35 TFLOPS
|
| 770 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 462.77 us = 0.29% latency, 0 FLOPS)
|
| 771 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 235.08 us = 0.15% latency, 0 FLOPS)
|
| 772 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 246.29 us = 0.16% latency, 279.02 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 773 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 155.21 us = 0.1% latency, 110.69 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 774 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.87 us = 0.1% latency, 113.12 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 775 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.38 us = 0.09% latency, 121.51 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 776 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.57 us = 0.09% latency, 124.88 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 777 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 242.95 us = 0.15% latency, 282.86 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 778 |
+
)
|
| 779 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 780 |
+
(mlp): GemmaMLP(
|
| 781 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.21 ms = 0.76% latency, 339.86 TFLOPS
|
| 782 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 337.12 us = 0.21% latency, 407.68 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 783 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 321.63 us = 0.2% latency, 427.32 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 784 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 300.88 us = 0.19% latency, 456.78 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 785 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 81.78 us = 0.05% latency, 410.31 GFLOPS)
|
| 786 |
+
)
|
| 787 |
+
)
|
| 788 |
+
(26): DiTLayer(
|
| 789 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.18 ms = 3.26% latency, 126.29 TFLOPS
|
| 790 |
+
(input_layernorm): AdaLayerNormZero(
|
| 791 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 589.61 us = 0.37% latency, 1.37 TFLOPS
|
| 792 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 33.62 us = 0.02% latency, 974.74 MFLOPS)
|
| 793 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 165.22 us = 0.1% latency, 4.87 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 794 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 239.13 us = 0.15% latency, 0 FLOPS)
|
| 795 |
+
)
|
| 796 |
+
(self_attn): DiTSelfAttention(
|
| 797 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.78 TFLOPS
|
| 798 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 461.82 us = 0.29% latency, 0 FLOPS)
|
| 799 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 239.61 us = 0.15% latency, 0 FLOPS)
|
| 800 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 250.1 us = 0.16% latency, 274.77 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 801 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 155.69 us = 0.1% latency, 110.35 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 802 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 149.73 us = 0.09% latency, 114.74 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 803 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.47 us = 0.09% latency, 123.18 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 804 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 135.9 us = 0.09% latency, 126.42 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 805 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 250.34 us = 0.16% latency, 274.51 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 806 |
+
)
|
| 807 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 240.09 us = 0.15% latency, 0 FLOPS)
|
| 808 |
+
(mlp): GemmaMLP(
|
| 809 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.24 ms = 0.78% latency, 332.92 TFLOPS
|
| 810 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 346.66 us = 0.22% latency, 396.47 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 811 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 329.02 us = 0.21% latency, 417.73 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 812 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 307.32 us = 0.19% latency, 447.22 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 813 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.45 us = 0.05% latency, 402.11 GFLOPS)
|
| 814 |
+
)
|
| 815 |
+
)
|
| 816 |
+
(27): DiTLayer(
|
| 817 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.15 ms = 3.25% latency, 126.81 TFLOPS
|
| 818 |
+
(input_layernorm): AdaLayerNormZero(
|
| 819 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 590.09 us = 0.37% latency, 1.36 TFLOPS
|
| 820 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.57 us = 0.02% latency, 947.85 MFLOPS)
|
| 821 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 165.22 us = 0.1% latency, 4.87 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 822 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.7 us = 0.15% latency, 0 FLOPS)
|
| 823 |
+
)
|
| 824 |
+
(self_attn): DiTSelfAttention(
|
| 825 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.71 ms = 1.71% latency, 88.7 TFLOPS
|
| 826 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.25 us = 0.29% latency, 0 FLOPS)
|
| 827 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 234.13 us = 0.15% latency, 0 FLOPS)
|
| 828 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 252.25 us = 0.16% latency, 272.43 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 829 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 160.46 us = 0.1% latency, 107.07 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 830 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 151.4 us = 0.1% latency, 113.48 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 831 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.38 us = 0.09% latency, 121.51 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 832 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.24 us = 0.09% latency, 123.39 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 833 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 243.66 us = 0.15% latency, 282.03 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 834 |
+
)
|
| 835 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 836 |
+
(mlp): GemmaMLP(
|
| 837 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.77% latency, 336.03 TFLOPS
|
| 838 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 341.65 us = 0.22% latency, 402.28 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 839 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 325.44 us = 0.21% latency, 422.32 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 840 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 304.7 us = 0.19% latency, 451.06 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 841 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 83.21 us = 0.05% latency, 403.26 GFLOPS)
|
| 842 |
+
)
|
| 843 |
+
)
|
| 844 |
+
(28): DiTLayer(
|
| 845 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.17 ms = 3.26% latency, 126.45 TFLOPS
|
| 846 |
+
(input_layernorm): AdaLayerNormZero(
|
| 847 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 592.71 us = 0.37% latency, 1.36 TFLOPS
|
| 848 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 35.29 us = 0.02% latency, 928.64 MFLOPS)
|
| 849 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 166.18 us = 0.1% latency, 4.85 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 850 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 851 |
+
)
|
| 852 |
+
(self_attn): DiTSelfAttention(
|
| 853 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.73 ms = 1.72% latency, 88.07 TFLOPS
|
| 854 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 464.92 us = 0.29% latency, 0 FLOPS)
|
| 855 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 245.33 us = 0.15% latency, 0 FLOPS)
|
| 856 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 252.96 us = 0.16% latency, 271.66 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 857 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 158.55 us = 0.1% latency, 108.36 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 858 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 157.12 us = 0.1% latency, 109.34 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 859 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 143.53 us = 0.09% latency, 119.7 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 860 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 137.57 us = 0.09% latency, 124.88 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 861 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 241.04 us = 0.15% latency, 285.09 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 862 |
+
)
|
| 863 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.46 us = 0.15% latency, 0 FLOPS)
|
| 864 |
+
(mlp): GemmaMLP(
|
| 865 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.22 ms = 0.77% latency, 337.8 TFLOPS
|
| 866 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 342.37 us = 0.22% latency, 401.44 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 867 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 321.87 us = 0.2% latency, 427.01 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 868 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 302.55 us = 0.19% latency, 454.26 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 869 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.49 us = 0.05% latency, 406.76 GFLOPS)
|
| 870 |
+
)
|
| 871 |
+
)
|
| 872 |
+
(29): DiTLayer(
|
| 873 |
+
100.68 M = 3.3% Params, 326.82 GMACs = 3.31% MACs, 5.16 ms = 3.25% latency, 126.68 TFLOPS
|
| 874 |
+
(input_layernorm): AdaLayerNormZero(
|
| 875 |
+
25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 577.21 us = 0.36% latency, 1.4 TFLOPS
|
| 876 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 35.05 us = 0.02% latency, 934.96 MFLOPS)
|
| 877 |
+
(linear): Linear(25.18 M = 0.83% Params, 402.65 MMACs = 0% MACs, 158.79 us = 0.1% latency, 5.07 TFLOPS, in_features=2048, out_features=12288, bias=True)
|
| 878 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 236.75 us = 0.15% latency, 0 FLOPS)
|
| 879 |
+
)
|
| 880 |
+
(self_attn): DiTSelfAttention(
|
| 881 |
+
25.17 M = 0.83% Params, 120.26 GMACs = 1.22% MACs, 2.73 ms = 1.72% latency, 88 TFLOPS
|
| 882 |
+
(q_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 463.72 us = 0.29% latency, 0 FLOPS)
|
| 883 |
+
(k_norm): GemmaRMSNorm(128 = 0% Params, 0 MACs = 0% MACs, 237.94 us = 0.15% latency, 0 FLOPS)
|
| 884 |
+
(q_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 247.48 us = 0.16% latency, 277.68 TFLOPS, in_features=2048, out_features=4096, bias=False)
|
| 885 |
+
(k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 165.94 us = 0.1% latency, 103.53 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 886 |
+
(v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 155.21 us = 0.1% latency, 110.69 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 887 |
+
(text_k_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 141.14 us = 0.09% latency, 121.72 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 888 |
+
(text_v_proj): Linear(2.1 M = 0.07% Params, 8.59 GMACs = 0.09% MACs, 139.95 us = 0.09% latency, 122.76 TFLOPS, in_features=2048, out_features=1024, bias=False)
|
| 889 |
+
(o_proj): Linear(8.39 M = 0.28% Params, 34.36 GMACs = 0.35% MACs, 252.25 us = 0.16% latency, 272.43 TFLOPS, in_features=4096, out_features=2048, bias=False)
|
| 890 |
+
)
|
| 891 |
+
(post_attention_layernorm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 237.7 us = 0.15% latency, 0 FLOPS)
|
| 892 |
+
(mlp): GemmaMLP(
|
| 893 |
+
50.33 M = 1.65% Params, 206.16 GMACs = 2.09% MACs, 1.23 ms = 0.77% latency, 336.09 TFLOPS
|
| 894 |
+
(gate_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 341.42 us = 0.22% latency, 402.56 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 895 |
+
(up_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 321.15 us = 0.2% latency, 427.96 TFLOPS, in_features=2048, out_features=8192, bias=False)
|
| 896 |
+
(down_proj): Linear(16.78 M = 0.55% Params, 68.72 GMACs = 0.7% MACs, 301.84 us = 0.19% latency, 455.34 TFLOPS, in_features=8192, out_features=2048, bias=False)
|
| 897 |
+
(act_fn): PytorchGELUTanh(0 = 0% Params, 0 MACs = 0% MACs, 82.49 us = 0.05% latency, 406.76 GFLOPS)
|
| 898 |
+
)
|
| 899 |
+
)
|
| 900 |
+
)
|
| 901 |
+
(patch_embed): PatchEmbed(
|
| 902 |
+
133.12 K = 0% Params, 536.87 MMACs = 0.01% MACs, 487.8 us = 0.31% latency, 2.22 TFLOPS
|
| 903 |
+
(proj): Conv2d(133.12 K = 0% Params, 536.87 MMACs = 0.01% MACs, 300.17 us = 0.19% latency, 3.61 TFLOPS, 16, 2048, kernel_size=(2, 2), stride=(2, 2))
|
| 904 |
+
)
|
| 905 |
+
(rotary_emb): GemmaRotaryEmbedding(0 = 0% Params, 0 MACs = 0% MACs, 0 s = 0% latency, 0 FLOPS)
|
| 906 |
+
(time_proj): Timesteps(0 = 0% Params, 0 MACs = 0% MACs, 248.43 us = 0.16% latency, 0 FLOPS)
|
| 907 |
+
(timestep_embedder): Sequential(
|
| 908 |
+
4.72 M = 0.15% Params, 75.5 MMACs = 0% MACs, 493.53 us = 0.31% latency, 306.02 GFLOPS
|
| 909 |
+
(0): Linear(526.34 K = 0.02% Params, 8.39 MMACs = 0% MACs, 227.93 us = 0.14% latency, 73.61 GFLOPS, in_features=256, out_features=2048, bias=True)
|
| 910 |
+
(1): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 50.78 us = 0.03% latency, 645.25 MFLOPS)
|
| 911 |
+
(2): Linear(4.2 M = 0.14% Params, 67.11 MMACs = 0% MACs, 144.72 us = 0.09% latency, 927.43 GFLOPS, in_features=2048, out_features=2048, bias=True)
|
| 912 |
+
)
|
| 913 |
+
(context_embedder): Sequential(
|
| 914 |
+
4.2 M = 0.14% Params, 17.18 GMACs = 0.17% MACs, 451.56 us = 0.28% latency, 76.09 TFLOPS
|
| 915 |
+
(0): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 171.18 us = 0.11% latency, 0 FLOPS)
|
| 916 |
+
(1): Linear(4.2 M = 0.14% Params, 17.18 GMACs = 0.17% MACs, 226.97 us = 0.14% latency, 151.38 TFLOPS, in_features=2048, out_features=2048, bias=True)
|
| 917 |
+
)
|
| 918 |
+
(norm_out): AdaLayerNormOut(
|
| 919 |
+
8.39 M = 0.28% Params, 134.22 MMACs = 0% MACs, 570.77 us = 0.36% latency, 470.36 GFLOPS
|
| 920 |
+
(silu): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 35.05 us = 0.02% latency, 934.96 MFLOPS)
|
| 921 |
+
(linear): Linear(8.39 M = 0.28% Params, 134.22 MMACs = 0% MACs, 148.77 us = 0.09% latency, 1.8 TFLOPS, in_features=2048, out_features=4096, bias=True)
|
| 922 |
+
(norm): GemmaRMSNorm(2.05 K = 0% Params, 0 MACs = 0% MACs, 238.66 us = 0.15% latency, 0 FLOPS)
|
| 923 |
+
)
|
| 924 |
+
(proj_out): Linear(131.14 K = 0% Params, 536.87 MMACs = 0.01% MACs, 173.09 us = 0.11% latency, 6.2 TFLOPS, in_features=2048, out_features=64, bias=True)
|
| 925 |
+
(repa_projector): Sequential(
|
| 926 |
+
9.97 M = 0.33% Params, 40.8 GMACs = 0.41% MACs, 698.57 us = 0.44% latency, 116.84 TFLOPS
|
| 927 |
+
(0): Linear(4.2 M = 0.14% Params, 17.18 GMACs = 0.17% MACs, 206.47 us = 0.13% latency, 166.41 TFLOPS, in_features=2048, out_features=2048, bias=True)
|
| 928 |
+
(1): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 34.57 us = 0.02% latency, 242.65 GFLOPS)
|
| 929 |
+
(2): Linear(4.2 M = 0.14% Params, 17.18 GMACs = 0.17% MACs, 171.9 us = 0.11% latency, 199.88 TFLOPS, in_features=2048, out_features=2048, bias=True)
|
| 930 |
+
(3): SiLU(0 = 0% Params, 0 MACs = 0% MACs, 36.24 us = 0.02% latency, 231.48 GFLOPS)
|
| 931 |
+
(4): Linear(1.57 M = 0.05% Params, 6.44 GMACs = 0.07% MACs, 148.06 us = 0.09% latency, 87.03 TFLOPS, in_features=2048, out_features=768, bias=True)
|
| 932 |
+
)
|
| 933 |
+
)
|
| 934 |
+
------------------------------------------------------------------------------
|
latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
100000
|
zero_to_fp32.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import torch
|
| 17 |
+
import glob
|
| 18 |
+
import math
|
| 19 |
+
import os
|
| 20 |
+
import re
|
| 21 |
+
from collections import OrderedDict
|
| 22 |
+
from dataclasses import dataclass
|
| 23 |
+
|
| 24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 26 |
+
from deepspeed.utils import logger
|
| 27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class zero_model_state:
|
| 34 |
+
buffers: dict()
|
| 35 |
+
param_shapes: dict()
|
| 36 |
+
shared_params: list
|
| 37 |
+
ds_version: int
|
| 38 |
+
frozen_param_shapes: dict()
|
| 39 |
+
frozen_param_fragments: dict()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
debug = 0
|
| 43 |
+
|
| 44 |
+
# load to cpu
|
| 45 |
+
device = torch.device('cpu')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def atoi(text):
|
| 49 |
+
return int(text) if text.isdigit() else text
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def natural_keys(text):
|
| 53 |
+
'''
|
| 54 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 56 |
+
(See Toothy's implementation in the comments)
|
| 57 |
+
'''
|
| 58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 62 |
+
if not os.path.isdir(checkpoint_dir):
|
| 63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 64 |
+
|
| 65 |
+
# there should be only one file
|
| 66 |
+
if zero_stage <= 2:
|
| 67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 68 |
+
elif zero_stage == 3:
|
| 69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 70 |
+
|
| 71 |
+
if not os.path.exists(file):
|
| 72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 73 |
+
|
| 74 |
+
return file
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 80 |
+
|
| 81 |
+
if len(ckpt_files) == 0:
|
| 82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 83 |
+
|
| 84 |
+
return ckpt_files
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_optim_files(checkpoint_dir):
|
| 88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def get_model_state_files(checkpoint_dir):
|
| 92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def parse_model_states(files):
|
| 96 |
+
zero_model_states = []
|
| 97 |
+
for file in files:
|
| 98 |
+
state_dict = torch.load(file, map_location=device)
|
| 99 |
+
|
| 100 |
+
if BUFFER_NAMES not in state_dict:
|
| 101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 103 |
+
if debug:
|
| 104 |
+
print("Found buffers:", buffer_names)
|
| 105 |
+
|
| 106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 109 |
+
|
| 110 |
+
# collect parameters that are included in param_shapes
|
| 111 |
+
param_names = []
|
| 112 |
+
for s in param_shapes:
|
| 113 |
+
for name in s.keys():
|
| 114 |
+
param_names.append(name)
|
| 115 |
+
|
| 116 |
+
# update with frozen parameters
|
| 117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 118 |
+
if frozen_param_shapes is not None:
|
| 119 |
+
if debug:
|
| 120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 121 |
+
param_names += list(frozen_param_shapes.keys())
|
| 122 |
+
|
| 123 |
+
# handle shared params
|
| 124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 125 |
+
|
| 126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 127 |
+
|
| 128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 129 |
+
|
| 130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 131 |
+
param_shapes=param_shapes,
|
| 132 |
+
shared_params=shared_params,
|
| 133 |
+
ds_version=ds_version,
|
| 134 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 135 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 136 |
+
zero_model_states.append(z_model_state)
|
| 137 |
+
|
| 138 |
+
return zero_model_states
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 142 |
+
|
| 143 |
+
total_files = len(files)
|
| 144 |
+
state_dicts = []
|
| 145 |
+
for f in files:
|
| 146 |
+
state_dict = torch.load(f, map_location=device)
|
| 147 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 148 |
+
# and also handle the case where it was already removed by another helper script
|
| 149 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 150 |
+
state_dicts.append(state_dict)
|
| 151 |
+
|
| 152 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 153 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 154 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 155 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 156 |
+
|
| 157 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 158 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 159 |
+
# use the max of the partition_count to get the dp world_size.
|
| 160 |
+
|
| 161 |
+
if type(world_size) is list:
|
| 162 |
+
world_size = max(world_size)
|
| 163 |
+
|
| 164 |
+
if world_size != total_files:
|
| 165 |
+
raise ValueError(
|
| 166 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 167 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# the groups are named differently in each stage
|
| 171 |
+
if zero_stage <= 2:
|
| 172 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 173 |
+
elif zero_stage == 3:
|
| 174 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 175 |
+
else:
|
| 176 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 177 |
+
|
| 178 |
+
if zero_stage <= 2:
|
| 179 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 180 |
+
elif zero_stage == 3:
|
| 181 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
| 182 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
| 183 |
+
#
|
| 184 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
| 185 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
| 186 |
+
|
| 187 |
+
fp32_flat_groups = [
|
| 188 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
| 189 |
+
]
|
| 190 |
+
|
| 191 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 195 |
+
"""
|
| 196 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 200 |
+
|
| 201 |
+
"""
|
| 202 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 203 |
+
|
| 204 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 205 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 206 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 207 |
+
|
| 208 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 209 |
+
|
| 210 |
+
zero_model_states = parse_model_states(model_files)
|
| 211 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 212 |
+
|
| 213 |
+
if zero_stage <= 2:
|
| 214 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 215 |
+
exclude_frozen_parameters)
|
| 216 |
+
elif zero_stage == 3:
|
| 217 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 218 |
+
exclude_frozen_parameters)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 222 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 223 |
+
return
|
| 224 |
+
|
| 225 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 226 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 227 |
+
|
| 228 |
+
if debug:
|
| 229 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 230 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 231 |
+
|
| 232 |
+
wanted_params = len(frozen_param_shapes)
|
| 233 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 234 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 235 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 236 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 237 |
+
|
| 238 |
+
total_params = 0
|
| 239 |
+
total_numel = 0
|
| 240 |
+
for name, shape in frozen_param_shapes.items():
|
| 241 |
+
total_params += 1
|
| 242 |
+
unpartitioned_numel = shape.numel()
|
| 243 |
+
total_numel += unpartitioned_numel
|
| 244 |
+
|
| 245 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 246 |
+
|
| 247 |
+
if debug:
|
| 248 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 249 |
+
|
| 250 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def _has_callable(obj, fn):
|
| 254 |
+
attr = getattr(obj, fn, None)
|
| 255 |
+
return callable(attr)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 259 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 260 |
+
|
| 261 |
+
# Reconstruction protocol:
|
| 262 |
+
#
|
| 263 |
+
# XXX: document this
|
| 264 |
+
|
| 265 |
+
if debug:
|
| 266 |
+
for i in range(world_size):
|
| 267 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 268 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 269 |
+
|
| 270 |
+
# XXX: memory usage doubles here (zero2)
|
| 271 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 272 |
+
merged_single_partition_of_fp32_groups = []
|
| 273 |
+
for i in range(num_param_groups):
|
| 274 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 275 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 276 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 277 |
+
avail_numel = sum(
|
| 278 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 279 |
+
|
| 280 |
+
if debug:
|
| 281 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 282 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 283 |
+
# not asserting if there is a mismatch due to possible padding
|
| 284 |
+
print(f"Have {avail_numel} numels to process.")
|
| 285 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 286 |
+
|
| 287 |
+
# params
|
| 288 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 289 |
+
# out-of-core computing solution
|
| 290 |
+
total_numel = 0
|
| 291 |
+
total_params = 0
|
| 292 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 293 |
+
offset = 0
|
| 294 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 295 |
+
for name, shape in shapes.items():
|
| 296 |
+
|
| 297 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 298 |
+
total_numel += unpartitioned_numel
|
| 299 |
+
total_params += 1
|
| 300 |
+
|
| 301 |
+
if debug:
|
| 302 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 303 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 304 |
+
offset += unpartitioned_numel
|
| 305 |
+
|
| 306 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 307 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 308 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 309 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 310 |
+
align_to = 2 * world_size
|
| 311 |
+
|
| 312 |
+
def zero2_align(x):
|
| 313 |
+
return align_to * math.ceil(x / align_to)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
offset = zero2_align(offset)
|
| 319 |
+
avail_numel = zero2_align(avail_numel)
|
| 320 |
+
|
| 321 |
+
if debug:
|
| 322 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 323 |
+
|
| 324 |
+
# Sanity check
|
| 325 |
+
if offset != avail_numel:
|
| 326 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 327 |
+
|
| 328 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 332 |
+
exclude_frozen_parameters):
|
| 333 |
+
state_dict = OrderedDict()
|
| 334 |
+
|
| 335 |
+
# buffers
|
| 336 |
+
buffers = zero_model_states[0].buffers
|
| 337 |
+
state_dict.update(buffers)
|
| 338 |
+
if debug:
|
| 339 |
+
print(f"added {len(buffers)} buffers")
|
| 340 |
+
|
| 341 |
+
if not exclude_frozen_parameters:
|
| 342 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 343 |
+
|
| 344 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 345 |
+
|
| 346 |
+
# recover shared parameters
|
| 347 |
+
for pair in zero_model_states[0].shared_params:
|
| 348 |
+
if pair[1] in state_dict:
|
| 349 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 350 |
+
|
| 351 |
+
return state_dict
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 355 |
+
remainder = unpartitioned_numel % world_size
|
| 356 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 357 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 358 |
+
return partitioned_numel, padding_numel
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 362 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 363 |
+
return
|
| 364 |
+
|
| 365 |
+
if debug:
|
| 366 |
+
for i in range(world_size):
|
| 367 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 368 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 369 |
+
|
| 370 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 371 |
+
wanted_params = len(frozen_param_shapes)
|
| 372 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 373 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 374 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 375 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 376 |
+
|
| 377 |
+
total_params = 0
|
| 378 |
+
total_numel = 0
|
| 379 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 380 |
+
total_params += 1
|
| 381 |
+
unpartitioned_numel = shape.numel()
|
| 382 |
+
total_numel += unpartitioned_numel
|
| 383 |
+
|
| 384 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 385 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 386 |
+
|
| 387 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 388 |
+
|
| 389 |
+
if debug:
|
| 390 |
+
print(
|
| 391 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 398 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 399 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 400 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 401 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 402 |
+
|
| 403 |
+
# merge list of dicts, preserving order
|
| 404 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 405 |
+
|
| 406 |
+
if debug:
|
| 407 |
+
for i in range(world_size):
|
| 408 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 409 |
+
|
| 410 |
+
wanted_params = len(param_shapes)
|
| 411 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 412 |
+
# not asserting if there is a mismatch due to possible padding
|
| 413 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 414 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 415 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 416 |
+
|
| 417 |
+
# params
|
| 418 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 419 |
+
# out-of-core computing solution
|
| 420 |
+
offset = 0
|
| 421 |
+
total_numel = 0
|
| 422 |
+
total_params = 0
|
| 423 |
+
for name, shape in param_shapes.items():
|
| 424 |
+
|
| 425 |
+
unpartitioned_numel = shape.numel()
|
| 426 |
+
total_numel += unpartitioned_numel
|
| 427 |
+
total_params += 1
|
| 428 |
+
|
| 429 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 430 |
+
|
| 431 |
+
if debug:
|
| 432 |
+
print(
|
| 433 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
# XXX: memory usage doubles here
|
| 437 |
+
state_dict[name] = torch.cat(
|
| 438 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
| 439 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 440 |
+
offset += partitioned_numel
|
| 441 |
+
|
| 442 |
+
offset *= world_size
|
| 443 |
+
|
| 444 |
+
# Sanity check
|
| 445 |
+
if offset != avail_numel:
|
| 446 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 447 |
+
|
| 448 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 452 |
+
exclude_frozen_parameters):
|
| 453 |
+
state_dict = OrderedDict()
|
| 454 |
+
|
| 455 |
+
# buffers
|
| 456 |
+
buffers = zero_model_states[0].buffers
|
| 457 |
+
state_dict.update(buffers)
|
| 458 |
+
if debug:
|
| 459 |
+
print(f"added {len(buffers)} buffers")
|
| 460 |
+
|
| 461 |
+
if not exclude_frozen_parameters:
|
| 462 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 463 |
+
|
| 464 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 465 |
+
|
| 466 |
+
# recover shared parameters
|
| 467 |
+
for pair in zero_model_states[0].shared_params:
|
| 468 |
+
if pair[1] in state_dict:
|
| 469 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 470 |
+
|
| 471 |
+
return state_dict
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
| 475 |
+
"""
|
| 476 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 477 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 478 |
+
via a model hub.
|
| 479 |
+
|
| 480 |
+
Args:
|
| 481 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 482 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 483 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 484 |
+
|
| 485 |
+
Returns:
|
| 486 |
+
- pytorch ``state_dict``
|
| 487 |
+
|
| 488 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
| 489 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 490 |
+
the checkpoint.
|
| 491 |
+
|
| 492 |
+
A typical usage might be ::
|
| 493 |
+
|
| 494 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 495 |
+
# do the training and checkpoint saving
|
| 496 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 497 |
+
model = model.cpu() # move to cpu
|
| 498 |
+
model.load_state_dict(state_dict)
|
| 499 |
+
# submit to model hub or save the model to share with others
|
| 500 |
+
|
| 501 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 502 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 503 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 504 |
+
|
| 505 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 506 |
+
|
| 507 |
+
"""
|
| 508 |
+
if tag is None:
|
| 509 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 510 |
+
if os.path.isfile(latest_path):
|
| 511 |
+
with open(latest_path, 'r') as fd:
|
| 512 |
+
tag = fd.read().strip()
|
| 513 |
+
else:
|
| 514 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 515 |
+
|
| 516 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 517 |
+
|
| 518 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 519 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 520 |
+
|
| 521 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
|
| 525 |
+
"""
|
| 526 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 527 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 528 |
+
|
| 529 |
+
Args:
|
| 530 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 531 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
| 532 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 533 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
| 537 |
+
print(f"Saving fp32 state dict to {output_file}")
|
| 538 |
+
torch.save(state_dict, output_file)
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 542 |
+
"""
|
| 543 |
+
1. Put the provided model to cpu
|
| 544 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 545 |
+
3. Load it into the provided model
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
- ``model``: the model object to update
|
| 549 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 550 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 551 |
+
|
| 552 |
+
Returns:
|
| 553 |
+
- ``model`: modified model
|
| 554 |
+
|
| 555 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 556 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 557 |
+
conveniently placed for you in the checkpoint folder.
|
| 558 |
+
|
| 559 |
+
A typical usage might be ::
|
| 560 |
+
|
| 561 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 562 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 563 |
+
# submit to model hub or save the model to share with others
|
| 564 |
+
|
| 565 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 566 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 567 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 568 |
+
|
| 569 |
+
"""
|
| 570 |
+
logger.info(f"Extracting fp32 weights")
|
| 571 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 572 |
+
|
| 573 |
+
logger.info(f"Overwriting model with fp32 weights")
|
| 574 |
+
model = model.cpu()
|
| 575 |
+
model.load_state_dict(state_dict, strict=False)
|
| 576 |
+
|
| 577 |
+
return model
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
if __name__ == "__main__":
|
| 581 |
+
|
| 582 |
+
parser = argparse.ArgumentParser()
|
| 583 |
+
parser.add_argument("checkpoint_dir",
|
| 584 |
+
type=str,
|
| 585 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 586 |
+
parser.add_argument(
|
| 587 |
+
"output_file",
|
| 588 |
+
type=str,
|
| 589 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
| 590 |
+
parser.add_argument("-t",
|
| 591 |
+
"--tag",
|
| 592 |
+
type=str,
|
| 593 |
+
default=None,
|
| 594 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 595 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 596 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 597 |
+
args = parser.parse_args()
|
| 598 |
+
|
| 599 |
+
debug = args.debug
|
| 600 |
+
|
| 601 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 602 |
+
args.output_file,
|
| 603 |
+
tag=args.tag,
|
| 604 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|