{ | |
"bits": 4, | |
"group_size": 128, | |
"sym": true, | |
"data_type": "int", | |
"enable_quanted_input": true, | |
"enable_minmax_tuning": true, | |
"seqlen": 2048, | |
"batch_size": 8, | |
"scale_dtype": "torch.float16", | |
"lr": 0.001, | |
"minmax_lr": 0.001, | |
"gradient_accumulate_steps": 1, | |
"iters": 1000, | |
"amp": true, | |
"nsamples": 512, | |
"low_gpu_mem_usage": true, | |
"enable_norm_bias_tuning": false, | |
"act_bits": 16, | |
"act_group_size": 128, | |
"act_sym": true, | |
"act_dynamic": true, | |
"act_data_type": "int", | |
"super_bits": null, | |
"super_group_size": null, | |
"dataset": "NeelNanda/pile-10k", | |
"autoround_version": "0.5.1", | |
"block_name_to_quantize": null, | |
"quant_method": "auto-round", | |
"packing_format": "auto_round:auto_gptq" | |
} |