bird-of-paradise commited on
Commit
1f4d6e1
·
verified ·
1 Parent(s): 99e0105

Fix encoder-decoder interaction: use final encoder output for all decoder layers

Browse files

Previously used layer-by-layer correspondence between encoder and decoder,
but standard implementation passes final encoder output to all decoder layers
for cross-attention. Thanks to community feedback for the correction.

Transformer_Implementation_Tutorial.ipynb CHANGED
@@ -437,7 +437,7 @@
437
  },
438
  {
439
  "cell_type": "code",
440
- "execution_count": 9,
441
  "id": "89c2eebd-d774-4734-b9ea-486183182d4c",
442
  "metadata": {},
443
  "outputs": [],
@@ -520,7 +520,7 @@
520
  },
521
  {
522
  "cell_type": "code",
523
- "execution_count": 11,
524
  "id": "6bed1d90-9b3b-468d-a746-d1d0e1753c19",
525
  "metadata": {},
526
  "outputs": [
@@ -583,7 +583,7 @@
583
  },
584
  {
585
  "cell_type": "code",
586
- "execution_count": 13,
587
  "id": "f3aca0ac-3b48-4033-a889-dd019e2d67a0",
588
  "metadata": {},
589
  "outputs": [],
@@ -705,7 +705,7 @@
705
  },
706
  {
707
  "cell_type": "code",
708
- "execution_count": 15,
709
  "id": "a9678630-4e02-442f-9503-15066a574228",
710
  "metadata": {},
711
  "outputs": [
@@ -760,7 +760,7 @@
760
  },
761
  {
762
  "cell_type": "code",
763
- "execution_count": 17,
764
  "id": "aefccb90-9a89-4579-aba0-d837535f2d98",
765
  "metadata": {},
766
  "outputs": [],
@@ -812,14 +812,16 @@
812
  "\n",
813
  " \n",
814
  " def forward(self, embed_encoder_input, embed_decoder_input, padding_mask=None):\n",
815
- " # First layer of the encoder, decoder deck takes input from outside the deck\n",
816
  " encoder_output = embed_encoder_input\n",
 
 
 
 
817
  " decoder_output = embed_decoder_input\n",
818
- "\n",
819
- " for (encoder, decoder) in zip(self.encoder_stack, self.decoder_stack):\n",
820
- " encoder_output = encoder(embed_input = encoder_output, padding_mask = padding_mask)\n",
821
- " decoder_output = decoder(embed_input = decoder_output, cross_input =encoder_output, padding_mask=padding_mask)\n",
822
- " \n",
823
  " return decoder_output"
824
  ]
825
  },
@@ -850,7 +852,7 @@
850
  },
851
  {
852
  "cell_type": "code",
853
- "execution_count": 19,
854
  "id": "e86e2ade-4584-4905-89ec-beaac7ebf401",
855
  "metadata": {},
856
  "outputs": [],
@@ -962,10 +964,24 @@
962
  },
963
  {
964
  "cell_type": "code",
965
- "execution_count": 21,
966
  "id": "f099df40-9609-407d-b3a8-0913bd77e011",
967
  "metadata": {},
968
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
969
  {
970
  "name": "stdout",
971
  "output_type": "stream",
@@ -1015,7 +1031,7 @@
1015
  },
1016
  {
1017
  "cell_type": "code",
1018
- "execution_count": 23,
1019
  "id": "cae577d2-6ad1-4e9f-ad9b-26c646ec7c2e",
1020
  "metadata": {},
1021
  "outputs": [
@@ -1025,16 +1041,16 @@
1025
  "text": [
1026
  "\n",
1027
  "Output Statistics:\n",
1028
- "Mean: 0.0000\n",
1029
  "Std: 1.0000\n",
1030
- "Min: -4.1044\n",
1031
- "Max: 3.5525\n",
1032
  "\n",
1033
  "Attention Analysis:\n",
1034
- "Unmasked positions mean: 0.8049\n",
1035
- "Masked positions mean: 0.8041\n",
1036
  "\n",
1037
- "Is the masking working? Yes\n",
1038
  "\n",
1039
  "All tests passed successfully!\n"
1040
  ]
@@ -1127,7 +1143,7 @@
1127
  },
1128
  {
1129
  "cell_type": "code",
1130
- "execution_count": 25,
1131
  "id": "94221639-1a01-48f7-b805-9113d4347ba7",
1132
  "metadata": {},
1133
  "outputs": [
@@ -1137,10 +1153,10 @@
1137
  "text": [
1138
  "\n",
1139
  "Output Statistics:\n",
1140
- "Mean: 0.0000\n",
1141
  "Std: 1.0000\n",
1142
- "Min: -4.3335\n",
1143
- "Max: 4.3904\n",
1144
  "\n",
1145
  "Shape Analysis:\n",
1146
  "Input shape: torch.Size([32, 20, 512])\n",
@@ -1225,7 +1241,7 @@
1225
  },
1226
  {
1227
  "cell_type": "code",
1228
- "execution_count": 27,
1229
  "id": "aab402c1-48a7-48b1-9202-3d2a43e186c9",
1230
  "metadata": {},
1231
  "outputs": [
@@ -1235,34 +1251,34 @@
1235
  "text": [
1236
  "\n",
1237
  "Encoder Layer 0 shape: torch.Size([8, 10, 512])\n",
1238
- "Decoder Layer 0 shape: torch.Size([8, 10, 512])\n",
1239
  "\n",
1240
  "Encoder Layer 1 shape: torch.Size([8, 10, 512])\n",
1241
- "Decoder Layer 1 shape: torch.Size([8, 10, 512])\n",
1242
  "\n",
1243
  "Encoder Layer 2 shape: torch.Size([8, 10, 512])\n",
1244
- "Decoder Layer 2 shape: torch.Size([8, 10, 512])\n",
1245
  "\n",
1246
  "Encoder Layer 3 shape: torch.Size([8, 10, 512])\n",
1247
- "Decoder Layer 3 shape: torch.Size([8, 10, 512])\n",
1248
  "\n",
1249
  "Encoder Layer 4 shape: torch.Size([8, 10, 512])\n",
1250
- "Decoder Layer 4 shape: torch.Size([8, 10, 512])\n",
1251
  "\n",
1252
  "Encoder Layer 5 shape: torch.Size([8, 10, 512])\n",
 
 
 
 
 
1253
  "Decoder Layer 5 shape: torch.Size([8, 10, 512])\n",
1254
  "\n",
1255
  "Final Output Statistics:\n",
1256
- "Mean: -0.0000\n",
1257
  "Std: 1.0000\n",
1258
- "Min: -4.3020\n",
1259
- "Max: 3.8850\n",
1260
  "\n",
1261
  "Shape Preservation Check:\n",
1262
  "Input shapes - Encoder: torch.Size([8, 10, 512]), Decoder: torch.Size([8, 10, 512])\n",
1263
  "Output shape: torch.Size([8, 10, 512])\n",
1264
  "\n",
1265
- "Mean absolute difference between input and output: 0.9114\n",
1266
  "Transformation occurred: Yes\n",
1267
  "\n",
1268
  "Total number of parameters: 37,834,752\n",
@@ -1356,7 +1372,7 @@
1356
  },
1357
  {
1358
  "cell_type": "code",
1359
- "execution_count": 29,
1360
  "id": "dfe316a8-d66f-417f-93e9-4450e4378e56",
1361
  "metadata": {},
1362
  "outputs": [
@@ -1374,7 +1390,7 @@
1374
  "\n",
1375
  "Probability Distribution Check:\n",
1376
  "Sum to 1: True\n",
1377
- "Max probability: 0.0004\n",
1378
  "Min probability: 0.0000\n",
1379
  "\n",
1380
  "Sample Predictions:\n",
@@ -1382,10 +1398,10 @@
1382
  "J'ai attendu un cours HuggingFace toute ma vie.\n",
1383
  "\n",
1384
  "Model output (decoded):\n",
1385
- "grassyane functioned bombay defiant necessitated necessitatedtipgne firmstipพ groupinglic chiefly\n",
1386
  "\n",
1387
  "Training Check:\n",
1388
- "Loss value: 10.7644\n",
1389
  "Has gradients: True\n"
1390
  ]
1391
  }
@@ -1523,7 +1539,7 @@
1523
  },
1524
  {
1525
  "cell_type": "code",
1526
- "execution_count": 31,
1527
  "id": "caf9fe26-7537-4a5b-9a66-2313bb36c40c",
1528
  "metadata": {},
1529
  "outputs": [
@@ -1616,7 +1632,7 @@
1616
  },
1617
  {
1618
  "cell_type": "code",
1619
- "execution_count": 33,
1620
  "id": "e2e9d8c0-7a6a-4c50-85ef-0e805f785f57",
1621
  "metadata": {},
1622
  "outputs": [
@@ -1722,7 +1738,7 @@
1722
  },
1723
  {
1724
  "cell_type": "code",
1725
- "execution_count": 35,
1726
  "id": "a1623643-fa64-4f9e-b399-d9d0fc6c0f54",
1727
  "metadata": {},
1728
  "outputs": [
@@ -1734,22 +1750,22 @@
1734
  "Cross-Attention Matrix Shape: torch.Size([2, 8, 5, 7])\n",
1735
  "\n",
1736
  "Cross-Attention Pattern (first head):\n",
1737
- "tensor([[0.1312, 0.1409, 0.1628, 0.1198, 0.1629, 0.1066, 0.1758],\n",
1738
- " [0.1423, 0.1153, 0.1398, 0.1557, 0.1642, 0.1757, 0.1070],\n",
1739
- " [0.0883, 0.2136, 0.0957, 0.2153, 0.1842, 0.0792, 0.1239],\n",
1740
- " [0.1307, 0.1610, 0.1614, 0.1063, 0.0865, 0.2293, 0.1249],\n",
1741
- " [0.1734, 0.0858, 0.1896, 0.1418, 0.1356, 0.1347, 0.1391]])\n",
1742
  "\n",
1743
  "Cross-Attention Analysis:\n",
1744
  "Mean attention weight: 0.1429\n",
1745
- "Min attention weight: 0.0396\n",
1746
- "Max attention weight: 0.4592\n",
1747
  "\n",
1748
  "Attention Coverage:\n",
1749
  "Each position's attention sums to 1: True\n",
1750
  "Every decoder position attends to some encoder position: True\n",
1751
  "\n",
1752
- "Attention entropy (higher means more uniform attention): 1.8920\n"
1753
  ]
1754
  }
1755
  ],
@@ -1826,7 +1842,7 @@
1826
  },
1827
  {
1828
  "cell_type": "code",
1829
- "execution_count": 39,
1830
  "id": "4c7a7533-75c3-4575-af0a-cdc90e8b4815",
1831
  "metadata": {},
1832
  "outputs": [
@@ -1839,11 +1855,11 @@
1839
  "\n",
1840
  "Cross-Attention Pattern (first head):\n",
1841
  "(Last two encoder positions should have zero attention)\n",
1842
- "tensor([[0.1828, 0.1964, 0.2268, 0.1669, 0.2271, 0.0000, 0.0000],\n",
1843
- " [0.1983, 0.1608, 0.1949, 0.2170, 0.2290, 0.0000, 0.0000],\n",
1844
- " [0.1107, 0.2680, 0.1200, 0.2702, 0.2311, 0.0000, 0.0000],\n",
1845
- " [0.2023, 0.2492, 0.2499, 0.1647, 0.1339, 0.0000, 0.0000],\n",
1846
- " [0.2387, 0.1182, 0.2611, 0.1953, 0.1867, 0.0000, 0.0000]])\n",
1847
  "\n",
1848
  "Masking Analysis:\n",
1849
  "Mean attention to masked positions: 0.00000000\n",
@@ -1854,8 +1870,8 @@
1854
  "Each position's attention sums to 1: True\n",
1855
  "\n",
1856
  "Unmasked Position Analysis:\n",
1857
- "Min attention to unmasked positions: 0.0465\n",
1858
- "Max attention to unmasked positions: 0.5392\n"
1859
  ]
1860
  }
1861
  ],
 
437
  },
438
  {
439
  "cell_type": "code",
440
+ "execution_count": 7,
441
  "id": "89c2eebd-d774-4734-b9ea-486183182d4c",
442
  "metadata": {},
443
  "outputs": [],
 
520
  },
521
  {
522
  "cell_type": "code",
523
+ "execution_count": 9,
524
  "id": "6bed1d90-9b3b-468d-a746-d1d0e1753c19",
525
  "metadata": {},
526
  "outputs": [
 
583
  },
584
  {
585
  "cell_type": "code",
586
+ "execution_count": 9,
587
  "id": "f3aca0ac-3b48-4033-a889-dd019e2d67a0",
588
  "metadata": {},
589
  "outputs": [],
 
705
  },
706
  {
707
  "cell_type": "code",
708
+ "execution_count": 13,
709
  "id": "a9678630-4e02-442f-9503-15066a574228",
710
  "metadata": {},
711
  "outputs": [
 
760
  },
761
  {
762
  "cell_type": "code",
763
+ "execution_count": 15,
764
  "id": "aefccb90-9a89-4579-aba0-d837535f2d98",
765
  "metadata": {},
766
  "outputs": [],
 
812
  "\n",
813
  " \n",
814
  " def forward(self, embed_encoder_input, embed_decoder_input, padding_mask=None):\n",
815
+ " # Process through all encoder layers first\n",
816
  " encoder_output = embed_encoder_input\n",
817
+ " for encoder in self.encoder_stack:\n",
818
+ " encoder_output = encoder(encoder_output, padding_mask)\n",
819
+ " \n",
820
+ " # Use final encoder output for all decoder layers\n",
821
  " decoder_output = embed_decoder_input\n",
822
+ " for decoder in self.decoder_stack:\n",
823
+ " decoder_output = decoder(decoder_output, encoder_output, padding_mask)\n",
824
+ " \n",
 
 
825
  " return decoder_output"
826
  ]
827
  },
 
852
  },
853
  {
854
  "cell_type": "code",
855
+ "execution_count": 13,
856
  "id": "e86e2ade-4584-4905-89ec-beaac7ebf401",
857
  "metadata": {},
858
  "outputs": [],
 
964
  },
965
  {
966
  "cell_type": "code",
967
+ "execution_count": 15,
968
  "id": "f099df40-9609-407d-b3a8-0913bd77e011",
969
  "metadata": {},
970
  "outputs": [
971
+ {
972
+ "name": "stderr",
973
+ "output_type": "stream",
974
+ "text": [
975
+ "/opt/anaconda3/lib/python3.11/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/opt/anaconda3/lib/python3.11/site-packages/torchvision/image.so, 0x0006): Symbol not found: __ZN3c1017RegisterOperatorsD1Ev\n",
976
+ " Referenced from: <1868C013-6C01-31FA-98D3-E369F1FD0275> /opt/anaconda3/lib/python3.11/site-packages/torchvision/image.so\n",
977
+ " Expected in: <44DEDA27-4DE9-3D4A-8EDE-5AA72081319F> /opt/anaconda3/lib/python3.11/site-packages/torch/lib/libtorch_cpu.dylib'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n",
978
+ " warn(\n",
979
+ "/opt/anaconda3/lib/python3.11/site-packages/torchvision/datapoints/__init__.py:12: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
980
+ " warnings.warn(_BETA_TRANSFORMS_WARNING)\n",
981
+ "/opt/anaconda3/lib/python3.11/site-packages/torchvision/transforms/v2/__init__.py:54: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
982
+ " warnings.warn(_BETA_TRANSFORMS_WARNING)\n"
983
+ ]
984
+ },
985
  {
986
  "name": "stdout",
987
  "output_type": "stream",
 
1031
  },
1032
  {
1033
  "cell_type": "code",
1034
+ "execution_count": 17,
1035
  "id": "cae577d2-6ad1-4e9f-ad9b-26c646ec7c2e",
1036
  "metadata": {},
1037
  "outputs": [
 
1041
  "text": [
1042
  "\n",
1043
  "Output Statistics:\n",
1044
+ "Mean: -0.0000\n",
1045
  "Std: 1.0000\n",
1046
+ "Min: -2.7968\n",
1047
+ "Max: 2.8519\n",
1048
  "\n",
1049
  "Attention Analysis:\n",
1050
+ "Unmasked positions mean: 0.8078\n",
1051
+ "Masked positions mean: 0.8078\n",
1052
  "\n",
1053
+ "Is the masking working? No\n",
1054
  "\n",
1055
  "All tests passed successfully!\n"
1056
  ]
 
1143
  },
1144
  {
1145
  "cell_type": "code",
1146
+ "execution_count": 19,
1147
  "id": "94221639-1a01-48f7-b805-9113d4347ba7",
1148
  "metadata": {},
1149
  "outputs": [
 
1153
  "text": [
1154
  "\n",
1155
  "Output Statistics:\n",
1156
+ "Mean: -0.0000\n",
1157
  "Std: 1.0000\n",
1158
+ "Min: -4.3617\n",
1159
+ "Max: 4.5787\n",
1160
  "\n",
1161
  "Shape Analysis:\n",
1162
  "Input shape: torch.Size([32, 20, 512])\n",
 
1241
  },
1242
  {
1243
  "cell_type": "code",
1244
+ "execution_count": 21,
1245
  "id": "aab402c1-48a7-48b1-9202-3d2a43e186c9",
1246
  "metadata": {},
1247
  "outputs": [
 
1251
  "text": [
1252
  "\n",
1253
  "Encoder Layer 0 shape: torch.Size([8, 10, 512])\n",
 
1254
  "\n",
1255
  "Encoder Layer 1 shape: torch.Size([8, 10, 512])\n",
 
1256
  "\n",
1257
  "Encoder Layer 2 shape: torch.Size([8, 10, 512])\n",
 
1258
  "\n",
1259
  "Encoder Layer 3 shape: torch.Size([8, 10, 512])\n",
 
1260
  "\n",
1261
  "Encoder Layer 4 shape: torch.Size([8, 10, 512])\n",
 
1262
  "\n",
1263
  "Encoder Layer 5 shape: torch.Size([8, 10, 512])\n",
1264
+ "Decoder Layer 0 shape: torch.Size([8, 10, 512])\n",
1265
+ "Decoder Layer 1 shape: torch.Size([8, 10, 512])\n",
1266
+ "Decoder Layer 2 shape: torch.Size([8, 10, 512])\n",
1267
+ "Decoder Layer 3 shape: torch.Size([8, 10, 512])\n",
1268
+ "Decoder Layer 4 shape: torch.Size([8, 10, 512])\n",
1269
  "Decoder Layer 5 shape: torch.Size([8, 10, 512])\n",
1270
  "\n",
1271
  "Final Output Statistics:\n",
1272
+ "Mean: 0.0000\n",
1273
  "Std: 1.0000\n",
1274
+ "Min: -3.7172\n",
1275
+ "Max: 4.1310\n",
1276
  "\n",
1277
  "Shape Preservation Check:\n",
1278
  "Input shapes - Encoder: torch.Size([8, 10, 512]), Decoder: torch.Size([8, 10, 512])\n",
1279
  "Output shape: torch.Size([8, 10, 512])\n",
1280
  "\n",
1281
+ "Mean absolute difference between input and output: 0.9379\n",
1282
  "Transformation occurred: Yes\n",
1283
  "\n",
1284
  "Total number of parameters: 37,834,752\n",
 
1372
  },
1373
  {
1374
  "cell_type": "code",
1375
+ "execution_count": 23,
1376
  "id": "dfe316a8-d66f-417f-93e9-4450e4378e56",
1377
  "metadata": {},
1378
  "outputs": [
 
1390
  "\n",
1391
  "Probability Distribution Check:\n",
1392
  "Sum to 1: True\n",
1393
+ "Max probability: 0.0005\n",
1394
  "Min probability: 0.0000\n",
1395
  "\n",
1396
  "Sample Predictions:\n",
 
1398
  "J'ai attendu un cours HuggingFace toute ma vie.\n",
1399
  "\n",
1400
  "Model output (decoded):\n",
1401
+ "##aco bearer barriedate gate spoil lowlands tam navigation growls 1971 painfully demand negativelyzam [unused158] lowlands\n",
1402
  "\n",
1403
  "Training Check:\n",
1404
+ "Loss value: 10.7329\n",
1405
  "Has gradients: True\n"
1406
  ]
1407
  }
 
1539
  },
1540
  {
1541
  "cell_type": "code",
1542
+ "execution_count": 25,
1543
  "id": "caf9fe26-7537-4a5b-9a66-2313bb36c40c",
1544
  "metadata": {},
1545
  "outputs": [
 
1632
  },
1633
  {
1634
  "cell_type": "code",
1635
+ "execution_count": 27,
1636
  "id": "e2e9d8c0-7a6a-4c50-85ef-0e805f785f57",
1637
  "metadata": {},
1638
  "outputs": [
 
1738
  },
1739
  {
1740
  "cell_type": "code",
1741
+ "execution_count": 29,
1742
  "id": "a1623643-fa64-4f9e-b399-d9d0fc6c0f54",
1743
  "metadata": {},
1744
  "outputs": [
 
1750
  "Cross-Attention Matrix Shape: torch.Size([2, 8, 5, 7])\n",
1751
  "\n",
1752
  "Cross-Attention Pattern (first head):\n",
1753
+ "tensor([[0.1308, 0.1502, 0.1380, 0.1131, 0.1987, 0.1117, 0.1576],\n",
1754
+ " [0.1303, 0.1041, 0.1502, 0.1756, 0.1679, 0.1589, 0.1130],\n",
1755
+ " [0.0896, 0.2159, 0.1142, 0.1718, 0.1797, 0.0844, 0.1444],\n",
1756
+ " [0.1250, 0.1650, 0.1607, 0.1053, 0.0868, 0.2349, 0.1223],\n",
1757
+ " [0.1637, 0.0842, 0.2093, 0.1223, 0.1274, 0.1392, 0.1540]])\n",
1758
  "\n",
1759
  "Cross-Attention Analysis:\n",
1760
  "Mean attention weight: 0.1429\n",
1761
+ "Min attention weight: 0.0389\n",
1762
+ "Max attention weight: 0.4142\n",
1763
  "\n",
1764
  "Attention Coverage:\n",
1765
  "Each position's attention sums to 1: True\n",
1766
  "Every decoder position attends to some encoder position: True\n",
1767
  "\n",
1768
+ "Attention entropy (higher means more uniform attention): 1.8917\n"
1769
  ]
1770
  }
1771
  ],
 
1842
  },
1843
  {
1844
  "cell_type": "code",
1845
+ "execution_count": 31,
1846
  "id": "4c7a7533-75c3-4575-af0a-cdc90e8b4815",
1847
  "metadata": {},
1848
  "outputs": [
 
1855
  "\n",
1856
  "Cross-Attention Pattern (first head):\n",
1857
  "(Last two encoder positions should have zero attention)\n",
1858
+ "tensor([[0.1791, 0.2055, 0.1888, 0.1547, 0.2719, 0.0000, 0.0000],\n",
1859
+ " [0.1789, 0.1430, 0.2063, 0.2412, 0.2306, 0.0000, 0.0000],\n",
1860
+ " [0.1162, 0.2800, 0.1480, 0.2228, 0.2330, 0.0000, 0.0000],\n",
1861
+ " [0.1945, 0.2566, 0.2500, 0.1638, 0.1350, 0.0000, 0.0000],\n",
1862
+ " [0.2316, 0.1191, 0.2961, 0.1730, 0.1802, 0.0000, 0.0000]])\n",
1863
  "\n",
1864
  "Masking Analysis:\n",
1865
  "Mean attention to masked positions: 0.00000000\n",
 
1870
  "Each position's attention sums to 1: True\n",
1871
  "\n",
1872
  "Unmasked Position Analysis:\n",
1873
+ "Min attention to unmasked positions: 0.0458\n",
1874
+ "Max attention to unmasked positions: 0.4875\n"
1875
  ]
1876
  }
1877
  ],