From 18e7e92939bf0f5f631bd729a498ad1666799fe2 Mon Sep 17 00:00:00 2001 From: weimingc <17592131+meenchen@users.noreply.github.com> Date: Thu, 6 Nov 2025 22:08:39 +0000 Subject: [PATCH 1/2] fix Signed-off-by: weimingc <17592131+meenchen@users.noreply.github.com> --- modelopt/torch/quantization/nn/modules/tensor_quantizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modelopt/torch/quantization/nn/modules/tensor_quantizer.py b/modelopt/torch/quantization/nn/modules/tensor_quantizer.py index bf801646b..4c8440f3d 100644 --- a/modelopt/torch/quantization/nn/modules/tensor_quantizer.py +++ b/modelopt/torch/quantization/nn/modules/tensor_quantizer.py @@ -558,7 +558,9 @@ def _real_quantize(self, inputs): inputs, axis=self._axis, block_sizes=self._block_sizes, - scales=self.amax / 448.0 if self.amax is not None else None, + scales=self.amax / 448.0 + if (self.amax is not None and not self._block_sizes) + else None, ) buffer_to_register["_scale"] = _scale elif self._num_bits == 8: From 17dcf35ac9569c21adeca739e9b894add07085d0 Mon Sep 17 00:00:00 2001 From: weimingc <17592131+meenchen@users.noreply.github.com> Date: Thu, 6 Nov 2025 22:21:20 +0000 Subject: [PATCH 2/2] add test Signed-off-by: weimingc <17592131+meenchen@users.noreply.github.com> --- .../torch/quantization/test_qtensor_cuda.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/gpu/torch/quantization/test_qtensor_cuda.py b/tests/gpu/torch/quantization/test_qtensor_cuda.py index f1e511c21..a3710ea0c 100644 --- a/tests/gpu/torch/quantization/test_qtensor_cuda.py +++ b/tests/gpu/torch/quantization/test_qtensor_cuda.py @@ -569,3 +569,36 @@ def test_nvfp4_dequantize_fast(self, shape, input_dtype): f"Fast and standard dequantization differ: " f"max diff = {(dequant_fast - dequant_standard).abs().max()}" ) + + @pytest.mark.parametrize("device", ["cuda"]) + @pytest.mark.parametrize("input_dtype", [torch.float32, torch.float16, torch.bfloat16]) + @pytest.mark.parametrize( + ("input_shape", "block_sizes"), + [ + ((128, 1152), {-1: 128}), + ((256, 256), {-1: 64, -2: 64}), # 2D block sizes + ], + ) + def test_fp8_with_amax_and_block_sizes(self, device, input_dtype, input_shape, block_sizes): + """Test FP8 quantization with both amax and block_sizes specified.""" + quant_cfg = QuantizerAttributeConfig( + num_bits=(4, 3), + block_sizes=block_sizes, + fake_quant=False, + ) + quantizer = TensorQuantizer(quant_cfg).to(device) + + # Set a mock amax (scalar) - this was causing the bug + mock_amax = torch.tensor(1.5, device=device) + quantizer.amax = mock_amax + + # Create input tensor + x = torch.randn(input_shape, dtype=input_dtype, device=device) + + # QDQ + q_x = quantizer(x) + deq_x = quantizer(q_x) + + assert torch.allclose(deq_x, x, rtol=1e-1, atol=1e-1) + assert hasattr(quantizer, "_scale") + assert quantizer._scale.numel() > 1