quant_stage: quant_modifiers: AWQModifier: config_groups: group_0: targets: [Linear] weights: num_bits: 8 type: int symmetric: true group_size: 32 strategy: group block_structure: null dynamic: false actorder: null observer: mse observer_kwargs: {} input_activations: null output_activations: null format: null targets: [Linear] ignore: [backbone.embeddings, 're:.*mixer.A_log', 're:.*conv1d.bias', 're:.*mixer.D', 're:.*mixer.dt_bias', 're:.*norm', 're:backbone.norm_f', lm_head] mappings: - smooth_layer: re:.*norm$ balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$', 're:.*up_proj$', 're:.*in_proj$'] - smooth_layer: re:.*v_proj$ balance_layers: ['re:.*o_proj$'] - smooth_layer: re:.*up_proj$ balance_layers: ['re:.*down_proj$'] duo_scaling: true