nm-testing/TinyLlama-1.1B-Chat-v1.0-pruned_50.2of4-FP8-compressed
0.6B
•
Updated
•
9
•
1
nm-testing/TinyLlama-1.1B-Chat-v1.0-pruned_50.2of4-FP8-uncompressed
1B
•
Updated
•
7
nm-testing/Llama-3.1-8B-gsm8k-quantized.w4a16-noactorder
2B
•
Updated
•
9
nm-testing/Sparse-Llama-3.1-8B-evolcodealpaca-pruned.2of4-quantized.w4a16-test
2B
•
Updated
•
7
nm-testing/pixtral-12b-W4A16
3B
•
Updated
•
7
nm-testing/pixtral-12b-W8A8
13B
•
Updated
•
9
•
2
nm-testing/Llama-3.2-90B-Vision-Instruct-quantized.w4a16
14B
•
Updated
•
12
•
1
nm-testing/Llama-3.2-11B-Vision-Instruct-quantized.w8a8
11B
•
Updated
•
11
nm-testing/Llama-3.1-8B-gsm8k-quantized.w4a16
2B
•
Updated
•
6
nm-testing/debug-gsm8k-llmcompressor_packed
2B
•
Updated
•
7
nm-testing/debug-gsm8k-autogptq_packed
2B
•
Updated
•
6
nm-testing/SparseLlama-3.1-8B-evolcodealpaca-pruned.2of4-FP8-dynamic
8B
•
Updated
•
6
nm-testing/Qwen2.5-Coder-32B-Instruct-FP8-dynamic
33B
•
Updated
•
10
•
5
nm-testing/Qwen2.5-32B-Instruct-FP8-dynamic
33B
•
Updated
•
46
•
2
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-uncompressed
8B
•
Updated
•
8
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-FP8-compressed
5B
•
Updated
•
7
nm-testing/Llama-3.1-8B-FP8-compressed
8B
•
Updated
•
635
nm-testing/Llama-3.1-8B-FP8-uncompressed
8B
•
Updated
•
6
nm-testing/Llama-3.1-8B-Instruct-GPTQ-nonuniform
2B
•
Updated
•
8
nm-testing/Llama-3.2-1B-Instruct-GPTQ-nonuniform
0.7B
•
Updated
•
8
nm-testing/debug-gsm8k-autogptq_unpacked
8B
•
Updated
•
4
nm-testing/Llama-3_1-8B_2of4_w4a16_gsm8k_256_8196_damp0_1_mse_llm_compressor
2B
•
Updated
•
10
nm-testing/Llama-3_1-8B__w4a16_gsm8k_256_8196_damp0_1_noactorder_autogptq
8B
•
Updated
•
4
nm-testing/SparseLlama-3.1-8B-gsm8k-pruned.2of4-quantized.w4a16-uncompressed
2B
•
Updated
•
5
nm-testing/yolov8-m_pytorch_ultralytics_voc_pruned80_quant-none
Updated
nm-testing/mobilenet_v1-1.0_pytorch_sparseml_imagenet_pruned-moderate
Updated
nm-testing/Llama-3.2-1B-Instruct-FP8-KV
1B
•
Updated
•
12
nm-testing/llava-1.5-7b-hf-FP8-dynamic
7B
•
Updated
•
27
nm-testing/Llama-3.2-1B-Instruct-HQQ
0.8B
•
Updated
•
149
nm-testing/TinyLlama-1.1B-Chat-v1.0-pruned_50.2of4-INT8-compressed
0.6B
•
Updated
•
10