Add tclf90/qwen2.5-72b-instruct-gptq-int3 to eval queue
Browse files
tclf90/qwen2.5-72b-instruct-gptq-int3_eval_request_False_GPTQ_3bit_int3_float16.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "tclf90/qwen2.5-72b-instruct-gptq-int3", "revision": "main", "private": false, "params": 32.76, "architectures": "Qwen2ForCausalLM", "quant_type": "GPTQ", "precision": "3bit", "model_params": 79.97, "model_size": 32.76, "weight_dtype": "int3", "compute_dtype": "float16", "gguf_ftype": "*Q4_0.gguf", "hardware": "gpu", "status": "Pending", "submitted_time": "2024-11-04T04:36:57Z", "model_type": "quantization", "job_id": -1, "job_start_time": null, "scripts": "ITREX"}
|