{"model": "ISTA-DASLab/Llama-2-7b-AQLM-2Bit-8x8-hf", "revision": "main", "private": false, "params": 2.73, "architectures": "LlamaForCausalLM", "quant_type": "AQLM", "precision": "2bit", "model_params": 6.48, "model_size": 2.73, "weight_dtype": "int2", "compute_dtype": "float16", "gguf_ftype": "*Q4_0.gguf", "hardware": "gpu", "status": "Pending", "submitted_time": "2024-05-15T03:43:56Z", "model_type": "quantization", "job_id": -1, "job_start_time": null, "scripts": "ITREX"}