This model is quantized with the following code:
from datasets import load_dataset
from gptqmodel import GPTQModel, QuantizeConfig
from huggingface_hub import constants
model_id = "Qwen/Qwen3-32B"
# Save the quantized model in the HF cache directory
cache_dir = constants.HF_HUB_CACHE
quant_path = os.path.join(cache_dir, "models--quantized--" + model_id.replace("/", "--") + "mixed--calibration")
os.makedirs(quant_path, exist_ok=True)
# Load calibration data (512 samples from C4)
calibration_dataset = load_dataset(
"allenai/c4",
data_files="en/c4-train.00001-of-01024.json.gz",
split="train"
).select(range(512))["text"]
# Add custom dataset
custom_calibration_dataset = []
with open("./data/custom_calibration_dataset.jsonl", "r") as f:
for line in f:
if line.strip(): # Skip empty lines
item = json.loads(line)
custom_calibration_dataset.append(item["text"])
# randomly choose 512 samples from custom_calibration_dataset, and add the last 6 samples
selected_samples = random.sample(custom_calibration_dataset, 512)
selected_samples.extend(custom_calibration_dataset[-6:])
selected_samples = list(set(selected_samples))
calibration_dataset.extend(selected_samples)
# shuffle calibration_dataset
random.shuffle(calibration_dataset)
# Configure and run quantization
quant_config = QuantizeConfig(bits=4, group_size=128)
model = GPTQModel.load(model_id, quant_config)
model.quantize(calibration_dataset, batch_size=2)
model.save(quant_path)
- Downloads last month
- 9
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
馃檵
Ask for provider support
Model tree for coco101010/Qwen3-32B-GPTQ-4bit-mixed-calibration
Base model
Qwen/Qwen3-32B