Matcha_clips_224_fintuned_4 / trainer_state.json
miladfa7's picture
Training in progress, epoch 1
a08ad0d verified
{
"best_global_step": 460,
"best_metric": 0.6951219512195121,
"best_model_checkpoint": "./Models/Matcha_clips_224_fintuned_4/checkpoint-460",
"epoch": 9.1,
"eval_steps": 500,
"global_step": 1150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008695652173913044,
"grad_norm": 6.4240241050720215,
"learning_rate": 3.91304347826087e-06,
"loss": 1.9844,
"step": 10
},
{
"epoch": 0.017391304347826087,
"grad_norm": 5.0679426193237305,
"learning_rate": 8.26086956521739e-06,
"loss": 1.9772,
"step": 20
},
{
"epoch": 0.02608695652173913,
"grad_norm": 7.605220794677734,
"learning_rate": 1.2608695652173914e-05,
"loss": 1.854,
"step": 30
},
{
"epoch": 0.034782608695652174,
"grad_norm": 8.32487964630127,
"learning_rate": 1.6956521739130433e-05,
"loss": 1.9078,
"step": 40
},
{
"epoch": 0.043478260869565216,
"grad_norm": 9.476813316345215,
"learning_rate": 2.1304347826086958e-05,
"loss": 1.6298,
"step": 50
},
{
"epoch": 0.05217391304347826,
"grad_norm": 12.970176696777344,
"learning_rate": 2.5652173913043483e-05,
"loss": 1.791,
"step": 60
},
{
"epoch": 0.06086956521739131,
"grad_norm": 12.027438163757324,
"learning_rate": 3e-05,
"loss": 1.6904,
"step": 70
},
{
"epoch": 0.06956521739130435,
"grad_norm": 9.076032638549805,
"learning_rate": 3.4347826086956526e-05,
"loss": 1.6026,
"step": 80
},
{
"epoch": 0.0782608695652174,
"grad_norm": 9.857560157775879,
"learning_rate": 3.869565217391305e-05,
"loss": 1.8465,
"step": 90
},
{
"epoch": 0.08695652173913043,
"grad_norm": 10.760807991027832,
"learning_rate": 4.304347826086957e-05,
"loss": 1.5482,
"step": 100
},
{
"epoch": 0.09565217391304348,
"grad_norm": 13.033352851867676,
"learning_rate": 4.739130434782609e-05,
"loss": 1.3814,
"step": 110
},
{
"epoch": 0.1,
"eval_accuracy": 0.4268292682926829,
"eval_loss": 1.5108782052993774,
"eval_runtime": 6.2348,
"eval_samples_per_second": 13.152,
"eval_steps_per_second": 3.368,
"step": 115
},
{
"epoch": 1.0043478260869565,
"grad_norm": 10.566363334655762,
"learning_rate": 4.980676328502415e-05,
"loss": 1.2744,
"step": 120
},
{
"epoch": 1.0130434782608695,
"grad_norm": 9.955045700073242,
"learning_rate": 4.932367149758454e-05,
"loss": 1.0512,
"step": 130
},
{
"epoch": 1.0217391304347827,
"grad_norm": 21.17795181274414,
"learning_rate": 4.884057971014493e-05,
"loss": 1.0042,
"step": 140
},
{
"epoch": 1.0304347826086957,
"grad_norm": 8.381865501403809,
"learning_rate": 4.8357487922705316e-05,
"loss": 0.7597,
"step": 150
},
{
"epoch": 1.0391304347826087,
"grad_norm": 10.130925178527832,
"learning_rate": 4.7874396135265706e-05,
"loss": 0.9822,
"step": 160
},
{
"epoch": 1.0478260869565217,
"grad_norm": 17.177282333374023,
"learning_rate": 4.739130434782609e-05,
"loss": 0.8898,
"step": 170
},
{
"epoch": 1.0565217391304347,
"grad_norm": 20.3695068359375,
"learning_rate": 4.690821256038648e-05,
"loss": 0.8532,
"step": 180
},
{
"epoch": 1.065217391304348,
"grad_norm": 15.664092063903809,
"learning_rate": 4.642512077294686e-05,
"loss": 0.873,
"step": 190
},
{
"epoch": 1.0739130434782609,
"grad_norm": 16.01185417175293,
"learning_rate": 4.594202898550725e-05,
"loss": 1.0756,
"step": 200
},
{
"epoch": 1.0826086956521739,
"grad_norm": 19.479690551757812,
"learning_rate": 4.545893719806764e-05,
"loss": 1.3473,
"step": 210
},
{
"epoch": 1.0913043478260869,
"grad_norm": 14.130785942077637,
"learning_rate": 4.497584541062802e-05,
"loss": 0.7922,
"step": 220
},
{
"epoch": 1.1,
"grad_norm": 9.247044563293457,
"learning_rate": 4.449275362318841e-05,
"loss": 0.776,
"step": 230
},
{
"epoch": 1.1,
"eval_accuracy": 0.573170731707317,
"eval_loss": 1.0662370920181274,
"eval_runtime": 6.0024,
"eval_samples_per_second": 13.661,
"eval_steps_per_second": 3.499,
"step": 230
},
{
"epoch": 2.008695652173913,
"grad_norm": 4.517851829528809,
"learning_rate": 4.4009661835748794e-05,
"loss": 0.2886,
"step": 240
},
{
"epoch": 2.017391304347826,
"grad_norm": 3.206162452697754,
"learning_rate": 4.352657004830918e-05,
"loss": 0.4656,
"step": 250
},
{
"epoch": 2.026086956521739,
"grad_norm": 1.1080957651138306,
"learning_rate": 4.304347826086957e-05,
"loss": 0.2634,
"step": 260
},
{
"epoch": 2.034782608695652,
"grad_norm": 5.800728797912598,
"learning_rate": 4.256038647342995e-05,
"loss": 0.222,
"step": 270
},
{
"epoch": 2.0434782608695654,
"grad_norm": 0.48055946826934814,
"learning_rate": 4.207729468599034e-05,
"loss": 0.1454,
"step": 280
},
{
"epoch": 2.0521739130434784,
"grad_norm": 2.7402403354644775,
"learning_rate": 4.1594202898550726e-05,
"loss": 0.1433,
"step": 290
},
{
"epoch": 2.0608695652173914,
"grad_norm": 8.421381950378418,
"learning_rate": 4.111111111111111e-05,
"loss": 0.3145,
"step": 300
},
{
"epoch": 2.0695652173913044,
"grad_norm": 4.506998538970947,
"learning_rate": 4.06280193236715e-05,
"loss": 0.2138,
"step": 310
},
{
"epoch": 2.0782608695652174,
"grad_norm": 15.715832710266113,
"learning_rate": 4.014492753623188e-05,
"loss": 0.2296,
"step": 320
},
{
"epoch": 2.0869565217391304,
"grad_norm": 1.7134050130844116,
"learning_rate": 3.966183574879227e-05,
"loss": 0.0711,
"step": 330
},
{
"epoch": 2.0956521739130434,
"grad_norm": 3.08317232131958,
"learning_rate": 3.9178743961352657e-05,
"loss": 0.3286,
"step": 340
},
{
"epoch": 2.1,
"eval_accuracy": 0.6463414634146342,
"eval_loss": 1.0357838869094849,
"eval_runtime": 6.0464,
"eval_samples_per_second": 13.562,
"eval_steps_per_second": 3.473,
"step": 345
},
{
"epoch": 3.0043478260869567,
"grad_norm": 0.37449759244918823,
"learning_rate": 3.869565217391305e-05,
"loss": 0.1835,
"step": 350
},
{
"epoch": 3.0130434782608697,
"grad_norm": 0.18086479604244232,
"learning_rate": 3.821256038647344e-05,
"loss": 0.0584,
"step": 360
},
{
"epoch": 3.0217391304347827,
"grad_norm": 0.32367080450057983,
"learning_rate": 3.772946859903382e-05,
"loss": 0.061,
"step": 370
},
{
"epoch": 3.0304347826086957,
"grad_norm": 0.264346718788147,
"learning_rate": 3.7246376811594204e-05,
"loss": 0.0423,
"step": 380
},
{
"epoch": 3.0391304347826087,
"grad_norm": 8.53426456451416,
"learning_rate": 3.6763285024154594e-05,
"loss": 0.0256,
"step": 390
},
{
"epoch": 3.0478260869565217,
"grad_norm": 0.227620467543602,
"learning_rate": 3.628019323671498e-05,
"loss": 0.0892,
"step": 400
},
{
"epoch": 3.0565217391304347,
"grad_norm": 0.21275293827056885,
"learning_rate": 3.579710144927537e-05,
"loss": 0.0628,
"step": 410
},
{
"epoch": 3.0652173913043477,
"grad_norm": 0.09173234552145004,
"learning_rate": 3.531400966183575e-05,
"loss": 0.0088,
"step": 420
},
{
"epoch": 3.0739130434782607,
"grad_norm": 0.3767607510089874,
"learning_rate": 3.4830917874396135e-05,
"loss": 0.0297,
"step": 430
},
{
"epoch": 3.082608695652174,
"grad_norm": 0.8464566469192505,
"learning_rate": 3.4347826086956526e-05,
"loss": 0.0493,
"step": 440
},
{
"epoch": 3.091304347826087,
"grad_norm": 0.3951728045940399,
"learning_rate": 3.386473429951691e-05,
"loss": 0.0953,
"step": 450
},
{
"epoch": 3.1,
"grad_norm": 1.0671485662460327,
"learning_rate": 3.338164251207729e-05,
"loss": 0.1288,
"step": 460
},
{
"epoch": 3.1,
"eval_accuracy": 0.6951219512195121,
"eval_loss": 1.2568752765655518,
"eval_runtime": 6.1334,
"eval_samples_per_second": 13.369,
"eval_steps_per_second": 3.424,
"step": 460
},
{
"epoch": 4.008695652173913,
"grad_norm": 3.649686813354492,
"learning_rate": 3.289855072463768e-05,
"loss": 0.0122,
"step": 470
},
{
"epoch": 4.017391304347826,
"grad_norm": 1.168199062347412,
"learning_rate": 3.2415458937198066e-05,
"loss": 0.0274,
"step": 480
},
{
"epoch": 4.026086956521739,
"grad_norm": 0.3025151789188385,
"learning_rate": 3.1932367149758457e-05,
"loss": 0.0088,
"step": 490
},
{
"epoch": 4.034782608695652,
"grad_norm": 0.033977147191762924,
"learning_rate": 3.144927536231884e-05,
"loss": 0.0049,
"step": 500
},
{
"epoch": 4.043478260869565,
"grad_norm": 0.22555121779441833,
"learning_rate": 3.0966183574879224e-05,
"loss": 0.0073,
"step": 510
},
{
"epoch": 4.052173913043478,
"grad_norm": 1.255247712135315,
"learning_rate": 3.0483091787439617e-05,
"loss": 0.0074,
"step": 520
},
{
"epoch": 4.060869565217391,
"grad_norm": 0.27926549315452576,
"learning_rate": 3e-05,
"loss": 0.0117,
"step": 530
},
{
"epoch": 4.069565217391304,
"grad_norm": 0.019165927544236183,
"learning_rate": 2.951690821256039e-05,
"loss": 0.002,
"step": 540
},
{
"epoch": 4.078260869565217,
"grad_norm": 0.042612988501787186,
"learning_rate": 2.9033816425120775e-05,
"loss": 0.0192,
"step": 550
},
{
"epoch": 4.086956521739131,
"grad_norm": 0.05526461824774742,
"learning_rate": 2.8550724637681158e-05,
"loss": 0.0021,
"step": 560
},
{
"epoch": 4.095652173913043,
"grad_norm": 0.03402889147400856,
"learning_rate": 2.806763285024155e-05,
"loss": 0.0023,
"step": 570
},
{
"epoch": 4.1,
"eval_accuracy": 0.6951219512195121,
"eval_loss": 1.3209176063537598,
"eval_runtime": 6.1384,
"eval_samples_per_second": 13.359,
"eval_steps_per_second": 3.421,
"step": 575
},
{
"epoch": 5.004347826086956,
"grad_norm": 0.023641662672162056,
"learning_rate": 2.7584541062801932e-05,
"loss": 0.0017,
"step": 580
},
{
"epoch": 5.01304347826087,
"grad_norm": 0.029742104932665825,
"learning_rate": 2.7101449275362322e-05,
"loss": 0.0014,
"step": 590
},
{
"epoch": 5.021739130434782,
"grad_norm": 0.1183994710445404,
"learning_rate": 2.6618357487922706e-05,
"loss": 0.0016,
"step": 600
},
{
"epoch": 5.030434782608696,
"grad_norm": 0.033052194863557816,
"learning_rate": 2.6135265700483093e-05,
"loss": 0.001,
"step": 610
},
{
"epoch": 5.039130434782609,
"grad_norm": 0.016786742955446243,
"learning_rate": 2.5652173913043483e-05,
"loss": 0.0023,
"step": 620
},
{
"epoch": 5.047826086956522,
"grad_norm": 0.020967165008187294,
"learning_rate": 2.5169082125603866e-05,
"loss": 0.0037,
"step": 630
},
{
"epoch": 5.056521739130435,
"grad_norm": 0.07056716829538345,
"learning_rate": 2.4685990338164253e-05,
"loss": 0.004,
"step": 640
},
{
"epoch": 5.065217391304348,
"grad_norm": 0.993996798992157,
"learning_rate": 2.420289855072464e-05,
"loss": 0.0022,
"step": 650
},
{
"epoch": 5.073913043478261,
"grad_norm": 0.028120990842580795,
"learning_rate": 2.3719806763285024e-05,
"loss": 0.001,
"step": 660
},
{
"epoch": 5.082608695652174,
"grad_norm": 0.025797342881560326,
"learning_rate": 2.323671497584541e-05,
"loss": 0.0013,
"step": 670
},
{
"epoch": 5.091304347826087,
"grad_norm": 0.054205186665058136,
"learning_rate": 2.2753623188405797e-05,
"loss": 0.0045,
"step": 680
},
{
"epoch": 5.1,
"grad_norm": 0.02057654596865177,
"learning_rate": 2.2270531400966184e-05,
"loss": 0.0012,
"step": 690
},
{
"epoch": 5.1,
"eval_accuracy": 0.6707317073170732,
"eval_loss": 1.2574602365493774,
"eval_runtime": 6.097,
"eval_samples_per_second": 13.449,
"eval_steps_per_second": 3.444,
"step": 690
},
{
"epoch": 6.008695652173913,
"grad_norm": 0.028874915093183517,
"learning_rate": 2.178743961352657e-05,
"loss": 0.0009,
"step": 700
},
{
"epoch": 6.017391304347826,
"grad_norm": 0.01535830833017826,
"learning_rate": 2.1304347826086958e-05,
"loss": 0.0011,
"step": 710
},
{
"epoch": 6.026086956521739,
"grad_norm": 0.020613593980669975,
"learning_rate": 2.0821256038647345e-05,
"loss": 0.0028,
"step": 720
},
{
"epoch": 6.034782608695652,
"grad_norm": 0.031190281733870506,
"learning_rate": 2.0338164251207732e-05,
"loss": 0.0009,
"step": 730
},
{
"epoch": 6.043478260869565,
"grad_norm": 0.015504710376262665,
"learning_rate": 1.985507246376812e-05,
"loss": 0.0008,
"step": 740
},
{
"epoch": 6.052173913043478,
"grad_norm": 0.015204375609755516,
"learning_rate": 1.9371980676328502e-05,
"loss": 0.0018,
"step": 750
},
{
"epoch": 6.060869565217391,
"grad_norm": 0.010091892443597317,
"learning_rate": 1.888888888888889e-05,
"loss": 0.0011,
"step": 760
},
{
"epoch": 6.069565217391304,
"grad_norm": 0.027177872136235237,
"learning_rate": 1.8405797101449276e-05,
"loss": 0.0009,
"step": 770
},
{
"epoch": 6.078260869565217,
"grad_norm": 0.011965460143983364,
"learning_rate": 1.7922705314009663e-05,
"loss": 0.0007,
"step": 780
},
{
"epoch": 6.086956521739131,
"grad_norm": 0.014813700690865517,
"learning_rate": 1.7439613526570046e-05,
"loss": 0.001,
"step": 790
},
{
"epoch": 6.095652173913043,
"grad_norm": 0.016053922474384308,
"learning_rate": 1.6956521739130433e-05,
"loss": 0.0008,
"step": 800
},
{
"epoch": 6.1,
"eval_accuracy": 0.6829268292682927,
"eval_loss": 1.2972846031188965,
"eval_runtime": 6.1036,
"eval_samples_per_second": 13.435,
"eval_steps_per_second": 3.441,
"step": 805
},
{
"epoch": 7.004347826086956,
"grad_norm": 0.020354464650154114,
"learning_rate": 1.6473429951690824e-05,
"loss": 0.0014,
"step": 810
},
{
"epoch": 7.01304347826087,
"grad_norm": 0.013575357384979725,
"learning_rate": 1.599033816425121e-05,
"loss": 0.0009,
"step": 820
},
{
"epoch": 7.021739130434782,
"grad_norm": 0.022664641961455345,
"learning_rate": 1.5507246376811597e-05,
"loss": 0.0008,
"step": 830
},
{
"epoch": 7.030434782608696,
"grad_norm": 0.026450317353010178,
"learning_rate": 1.5024154589371981e-05,
"loss": 0.0007,
"step": 840
},
{
"epoch": 7.039130434782609,
"grad_norm": 0.013138238340616226,
"learning_rate": 1.4541062801932368e-05,
"loss": 0.0008,
"step": 850
},
{
"epoch": 7.047826086956522,
"grad_norm": 0.02052425779402256,
"learning_rate": 1.4057971014492755e-05,
"loss": 0.0007,
"step": 860
},
{
"epoch": 7.056521739130435,
"grad_norm": 0.012927724979817867,
"learning_rate": 1.3574879227053142e-05,
"loss": 0.0007,
"step": 870
},
{
"epoch": 7.065217391304348,
"grad_norm": 0.014560637064278126,
"learning_rate": 1.3091787439613527e-05,
"loss": 0.0007,
"step": 880
},
{
"epoch": 7.073913043478261,
"grad_norm": 0.008629231713712215,
"learning_rate": 1.2608695652173914e-05,
"loss": 0.0007,
"step": 890
},
{
"epoch": 7.082608695652174,
"grad_norm": 0.0358402356505394,
"learning_rate": 1.21256038647343e-05,
"loss": 0.0008,
"step": 900
},
{
"epoch": 7.091304347826087,
"grad_norm": 0.014950945042073727,
"learning_rate": 1.1642512077294687e-05,
"loss": 0.0007,
"step": 910
},
{
"epoch": 7.1,
"grad_norm": 0.01364809088408947,
"learning_rate": 1.1159420289855073e-05,
"loss": 0.0007,
"step": 920
},
{
"epoch": 7.1,
"eval_accuracy": 0.6829268292682927,
"eval_loss": 1.3180800676345825,
"eval_runtime": 6.3025,
"eval_samples_per_second": 13.011,
"eval_steps_per_second": 3.332,
"step": 920
},
{
"epoch": 8.008695652173913,
"grad_norm": 0.008946227841079235,
"learning_rate": 1.067632850241546e-05,
"loss": 0.0006,
"step": 930
},
{
"epoch": 8.017391304347827,
"grad_norm": 0.024497035890817642,
"learning_rate": 1.0193236714975846e-05,
"loss": 0.0007,
"step": 940
},
{
"epoch": 8.02608695652174,
"grad_norm": 0.01063747052103281,
"learning_rate": 9.710144927536233e-06,
"loss": 0.0006,
"step": 950
},
{
"epoch": 8.034782608695652,
"grad_norm": 0.014025327749550343,
"learning_rate": 9.227053140096618e-06,
"loss": 0.0008,
"step": 960
},
{
"epoch": 8.043478260869565,
"grad_norm": 0.021205030381679535,
"learning_rate": 8.743961352657005e-06,
"loss": 0.0007,
"step": 970
},
{
"epoch": 8.052173913043479,
"grad_norm": 0.009876050055027008,
"learning_rate": 8.26086956521739e-06,
"loss": 0.0007,
"step": 980
},
{
"epoch": 8.060869565217391,
"grad_norm": 0.007320360280573368,
"learning_rate": 7.777777777777777e-06,
"loss": 0.0006,
"step": 990
},
{
"epoch": 8.069565217391304,
"grad_norm": 0.007261498365551233,
"learning_rate": 7.294685990338164e-06,
"loss": 0.0006,
"step": 1000
},
{
"epoch": 8.078260869565218,
"grad_norm": 0.013149461708962917,
"learning_rate": 6.811594202898551e-06,
"loss": 0.0008,
"step": 1010
},
{
"epoch": 8.08695652173913,
"grad_norm": 0.013871278613805771,
"learning_rate": 6.328502415458938e-06,
"loss": 0.0007,
"step": 1020
},
{
"epoch": 8.095652173913043,
"grad_norm": 0.018679574131965637,
"learning_rate": 5.845410628019324e-06,
"loss": 0.0006,
"step": 1030
},
{
"epoch": 8.1,
"eval_accuracy": 0.6829268292682927,
"eval_loss": 1.329393982887268,
"eval_runtime": 6.1832,
"eval_samples_per_second": 13.262,
"eval_steps_per_second": 3.396,
"step": 1035
},
{
"epoch": 9.004347826086956,
"grad_norm": 0.011226188391447067,
"learning_rate": 5.36231884057971e-06,
"loss": 0.0006,
"step": 1040
},
{
"epoch": 9.013043478260869,
"grad_norm": 0.017457231879234314,
"learning_rate": 4.879227053140096e-06,
"loss": 0.0006,
"step": 1050
},
{
"epoch": 9.021739130434783,
"grad_norm": 0.023909136652946472,
"learning_rate": 4.396135265700483e-06,
"loss": 0.0007,
"step": 1060
},
{
"epoch": 9.030434782608696,
"grad_norm": 0.00866635050624609,
"learning_rate": 3.91304347826087e-06,
"loss": 0.0006,
"step": 1070
},
{
"epoch": 9.039130434782608,
"grad_norm": 0.013021158054471016,
"learning_rate": 3.4299516908212565e-06,
"loss": 0.0007,
"step": 1080
},
{
"epoch": 9.047826086956523,
"grad_norm": 0.008441662415862083,
"learning_rate": 2.9468599033816426e-06,
"loss": 0.0006,
"step": 1090
},
{
"epoch": 9.056521739130435,
"grad_norm": 0.012296984903514385,
"learning_rate": 2.463768115942029e-06,
"loss": 0.0006,
"step": 1100
},
{
"epoch": 9.065217391304348,
"grad_norm": 0.025126850232481956,
"learning_rate": 1.9806763285024155e-06,
"loss": 0.0007,
"step": 1110
},
{
"epoch": 9.07391304347826,
"grad_norm": 0.012579759582877159,
"learning_rate": 1.497584541062802e-06,
"loss": 0.0007,
"step": 1120
},
{
"epoch": 9.082608695652175,
"grad_norm": 0.012673572637140751,
"learning_rate": 1.0144927536231885e-06,
"loss": 0.0006,
"step": 1130
},
{
"epoch": 9.091304347826087,
"grad_norm": 0.007186697795987129,
"learning_rate": 5.314009661835749e-07,
"loss": 0.0006,
"step": 1140
},
{
"epoch": 9.1,
"grad_norm": 0.013606193475425243,
"learning_rate": 4.8309178743961356e-08,
"loss": 0.0006,
"step": 1150
},
{
"epoch": 9.1,
"eval_accuracy": 0.6951219512195121,
"eval_loss": 1.332796573638916,
"eval_runtime": 6.2824,
"eval_samples_per_second": 13.052,
"eval_steps_per_second": 3.343,
"step": 1150
},
{
"epoch": 9.1,
"step": 1150,
"total_flos": 5.732152700888678e+18,
"train_loss": 0.30071158163492445,
"train_runtime": 1642.1131,
"train_samples_per_second": 2.801,
"train_steps_per_second": 0.7
},
{
"epoch": 9.1,
"eval_accuracy": 0.6951219512195121,
"eval_loss": 1.2568752765655518,
"eval_runtime": 5.6457,
"eval_samples_per_second": 14.524,
"eval_steps_per_second": 3.72,
"step": 1150
}
],
"logging_steps": 10,
"max_steps": 1150,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.732152700888678e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}