-
Notifications
You must be signed in to change notification settings - Fork 365
Test Only fp4: Lluo/fp4 try out #3521
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 17:28:16.606815+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 17:28:40.517973+00:00
@@ -140,12 +140,11 @@
return dequantized_data
# TODO: to remove it this is to make sure our global scale and block scale calculation is correct during debugging
def _test_weights_scaling_factor(
- weights_tensor: torch.Tensor,
- global_scale: torch.Tensor
+ weights_tensor: torch.Tensor, global_scale: torch.Tensor
) -> None:
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
import modelopt.onnx.quantization.quant_utils as quant_utils
@@ -192,11 +191,13 @@
"""
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
- weights_tensor, 16, global_scale,
+ weights_tensor,
+ 16,
+ global_scale,
)[0]
weights_tensor_scaled = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
@@ -205,11 +206,13 @@
)[0]._quantized_data
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
- weights_fp4_represented_in_uint8 = get_trt_tensor(ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8")
+ weights_fp4_represented_in_uint8 = get_trt_tensor(
+ ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8"
+ )
# dequantize block scale from fp8 to float32
dequantize_block_scale_layer = ctx.net.add_dequantize(
block_scale_fp8,
global_scale,
@@ -248,6 +251,5 @@
) # amax is calculated from input_tensor.abs().amax().float()
global_scale = torch.divide(amax, 6 * 448)
if global_scale == 0:
global_scale = 1.0
return global_scale
-
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 21:33:37.025993+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 21:33:59.004002+00:00
@@ -140,12 +140,11 @@
return dequantized_data
# TODO: to remove it this is to make sure our global scale and block scale calculation is correct during debugging
def _test_weights_scaling_factor(
- weights_tensor: torch.Tensor,
- global_scale: torch.Tensor
+ weights_tensor: torch.Tensor, global_scale: torch.Tensor
) -> None:
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
import modelopt.onnx.quantization.quant_utils as quant_utils
@@ -192,11 +191,13 @@
"""
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
- weights_tensor, 16, global_scale,
+ weights_tensor,
+ 16,
+ global_scale,
)[0]
weights_tensor_scaled = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
@@ -205,11 +206,13 @@
)[0]._quantized_data
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
- weights_fp4_represented_in_uint8 = get_trt_tensor(ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8")
+ weights_fp4_represented_in_uint8 = get_trt_tensor(
+ ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8"
+ )
# dequantize block scale from fp8 to float32
dequantize_block_scale_layer = ctx.net.add_dequantize(
block_scale_fp8,
global_scale,
@@ -248,6 +251,5 @@
) # amax is calculated from input_tensor.abs().amax().float()
global_scale = torch.divide(amax, 6 * 448)
if global_scale == 0:
global_scale = 1.0
return global_scale
-
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 22:36:44.918571+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-15 22:37:09.722122+00:00
@@ -140,12 +140,11 @@
return dequantized_data
# TODO: to remove it this is to make sure our global scale and block scale calculation is correct during debugging
def _test_weights_scaling_factor(
- weights_tensor: torch.Tensor,
- global_scale: torch.Tensor
+ weights_tensor: torch.Tensor, global_scale: torch.Tensor
) -> None:
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
import modelopt.onnx.quantization.quant_utils as quant_utils
@@ -192,11 +191,13 @@
"""
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
- weights_tensor, 16, global_scale,
+ weights_tensor,
+ 16,
+ global_scale,
)[0]
weights_tensor_scaled = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
@@ -205,11 +206,13 @@
)[0]._quantized_data
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
- weights_fp4_represented_in_uint8 = get_trt_tensor(ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8")
+ weights_fp4_represented_in_uint8 = get_trt_tensor(
+ ctx, weights_tensor_scaled, name + "_weights_fp4_represented_in_uint8"
+ )
# dequantize block scale from fp8 to float32
dequantize_block_scale_layer = ctx.net.add_dequantize(
block_scale_fp8,
global_scale,
@@ -248,6 +251,5 @@
) # amax is calculated from input_tensor.abs().amax().float()
global_scale = torch.divide(amax, 6 * 448)
if global_scale == 0:
global_scale = 1.0
return global_scale
-
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-16 17:17:53.756341+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-16 17:18:21.840287+00:00
@@ -107,11 +107,13 @@
"""
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
if input_tensor.dtype not in [trt.DataType.HALF, trt.DataType.FLOAT]:
- raise ValueError(f"Currently try float16, float32 only on input tensor for now. Unsupported dtype: {input_tensor.dtype}")
+ raise ValueError(
+ f"Currently try float16, float32 only on input tensor for now. Unsupported dtype: {input_tensor.dtype}"
+ )
# dynamic quantize input tensor to fp4
dynamic_quantize_layer = ctx.net.add_dynamic_quantize(
input_tensor,
axis,
block_size,
@@ -194,17 +196,19 @@
Returns:
quantized data tensor in fp4
"""
import modelopt.core.torch.quantization.qtensor.nvfp4_tensor as nvfp4_tensor
-
+
if weights_tensor.dtype == torch.float16:
original_dtype = trt.DataType.HALF
elif weights_tensor.dtype == torch.float32:
original_dtype = trt.DataType.FLOAT
else:
- raise ValueError(f"Currently try float16, float32 only on weights tensor. Unsupported dtype: {weights_tensor.dtype}")
+ raise ValueError(
+ f"Currently try float16, float32 only on weights tensor. Unsupported dtype: {weights_tensor.dtype}"
+ )
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
weights_tensor,
16,
global_scale,
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-16 17:17:53.783341+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-16 17:18:27.298406+00:00
@@ -213,11 +213,13 @@
from modelopt.torch.quantization.utils import export_torch_mode
class SimpleNetwork(torch.nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
- self.linear1 = torch.nn.Linear(in_features=64, out_features=32, bias=False, dtype=torch.float16)
+ self.linear1 = torch.nn.Linear(
+ in_features=64, out_features=32, bias=False, dtype=torch.float16
+ )
def forward(self, x):
x = self.linear1(x)
return x
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-18 17:54:24.708675+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-18 17:54:58.520847+00:00
@@ -235,11 +235,11 @@
print(f"lan added pytorch output_pyt: {output_pyt}")
quant_cfg = mtq.NVFP4_DEFAULT_CFG
mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop)
# model has qdq nodes at this point
-
+
torch.onnx.export(model, input_tensor, "mtq_model.onnx")
with torch.no_grad():
with export_torch_mode():
exp_program = torch.export.export(model, (input_tensor,), strict=False)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-18 21:19:00.783067+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-18 21:19:23.297120+00:00
@@ -214,19 +214,25 @@
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
weights_tensor,
16,
global_scale,
)[0]
- print(f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}")
- print(f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}")
+ print(
+ f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}"
+ )
+ print(
+ f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}"
+ )
weights_tensor_fp4 = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
block_scale_fp8,
global_scale,
)[0]._quantized_data
- print(f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}")
+ print(
+ f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}"
+ )
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
weights_tensor_fp4 = get_trt_tensor(ctx, weights_tensor_fp4, name + "_weights_fp4")
# dequantize block scale from fp8 to original dtype (default is float32)
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-18 21:19:00.810067+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-18 21:19:28.498117+00:00
@@ -229,22 +229,28 @@
input_tensor = torch.ones(128, 64, dtype=torch.float16).cuda()
print(f"lan added amax: {input_tensor.abs().amax()}")
model = SimpleNetwork().eval().cuda()
- model.linear1.weight = torch.nn.Parameter(torch.ones(32, 64, dtype=torch.float16).cuda())
- model.linear1.bias = torch.nn.Parameter(torch.zeros(128, 32, dtype=torch.float16).cuda())
+ model.linear1.weight = torch.nn.Parameter(
+ torch.ones(32, 64, dtype=torch.float16).cuda()
+ )
+ model.linear1.bias = torch.nn.Parameter(
+ torch.zeros(128, 32, dtype=torch.float16).cuda()
+ )
output_pyt = model(input_tensor)
- print(f"lan added model input: {input_tensor=}")
+ print(f"lan added model input: {input_tensor=}")
print(f"lan added model weight: {model.linear1.weight=}")
print(f"lan added model bias: {model.linear1.bias=}")
- print(f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}")
+ print(
+ f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}"
+ )
quant_cfg = mtq.NVFP4_DEFAULT_CFG
mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop)
# model has qdq nodes at this point
-
+
torch.onnx.export(model, input_tensor, "mtq_model.onnx")
with torch.no_grad():
with export_torch_mode():
exp_program = torch.export.export(model, (input_tensor,), strict=False)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-20 22:04:08.054204+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-20 22:04:33.547147+00:00
@@ -214,19 +214,25 @@
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
weights_tensor,
16,
global_scale,
)[0]
- print(f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}")
- print(f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}")
+ print(
+ f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}"
+ )
+ print(
+ f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}"
+ )
weights_tensor_fp4 = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
block_scale_fp8,
global_scale,
)[0]._quantized_data
- print(f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}")
+ print(
+ f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}"
+ )
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
weights_tensor_fp4 = get_trt_tensor(ctx, weights_tensor_fp4, name + "_weights_fp4")
# dequantize block scale from fp8 to original dtype (default is float32)
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-20 22:04:08.081205+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-20 22:04:39.052999+00:00
@@ -229,22 +229,28 @@
input_tensor = torch.ones(128, 64, dtype=torch.float16).cuda()
print(f"lan added amax: {input_tensor.abs().amax()}")
model = SimpleNetwork().eval().cuda()
- model.linear1.weight = torch.nn.Parameter(torch.ones(32, 64, dtype=torch.float16).cuda())
- model.linear1.bias = torch.nn.Parameter(torch.zeros(128, 32, dtype=torch.float16).cuda())
+ model.linear1.weight = torch.nn.Parameter(
+ torch.ones(32, 64, dtype=torch.float16).cuda()
+ )
+ model.linear1.bias = torch.nn.Parameter(
+ torch.zeros(128, 32, dtype=torch.float16).cuda()
+ )
output_pyt = model(input_tensor)
- print(f"lan added model input: {input_tensor=}")
+ print(f"lan added model input: {input_tensor=}")
print(f"lan added model weight: {model.linear1.weight=}")
print(f"lan added model bias: {model.linear1.bias=}")
- print(f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}")
+ print(
+ f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}"
+ )
quant_cfg = mtq.NVFP4_DEFAULT_CFG
mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop)
# model has qdq nodes at this point
-
+
torch.onnx.export(model, input_tensor, "mtq_model.onnx")
with torch.no_grad():
with export_torch_mode():
exp_program = torch.export.export(model, (input_tensor,), strict=False)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/_compiler.py 2025-05-21 21:05:16.522261+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/_compiler.py 2025-05-21 21:05:39.067088+00:00
@@ -580,16 +580,16 @@
f"Detected torch_executed_modules was non-empty: {torch_executed_modules}"
"\nThis feature is unimplemented in Torch-TRT Dynamo currently."
)
# if use_explicit_typing:
- # if len(enabled_precisions) != 1 or not any(
- # x in enabled_precisions for x in {torch.float32, dtype.f32}
- # ):
- # raise AssertionError(
- # f"When use_explicit_typing is enabled, only torch.float32 is allowed in the enabled_precisions but found {enabled_precisions}"
- # )
+ # if len(enabled_precisions) != 1 or not any(
+ # x in enabled_precisions for x in {torch.float32, dtype.f32}
+ # ):
+ # raise AssertionError(
+ # f"When use_explicit_typing is enabled, only torch.float32 is allowed in the enabled_precisions but found {enabled_precisions}"
+ # )
if use_fp32_acc:
logger.debug(
"FP32 accumulation for matmul layers is enabled. This option should only be enabled if the model already has FP16 weights and has no effect if it has FP32 weights. \
This flag inserts casts around matmul layers and ensures TensorRT executes the matmul layers in FP16 with FP32 accumulation."
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-21 21:05:16.525261+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/conversion/impl/nvfp4_quantize.py 2025-05-21 21:05:39.293128+00:00
@@ -12,10 +12,11 @@
to_torch,
)
from torch_tensorrt.fx.converters.converter_utils import set_layer_name
from torch_tensorrt.fx.types import TRTTensor
import os
+
def nvfp4_quantize(
ctx: ConversionContext,
target: Target,
source_ir: Optional[SourceIR],
@@ -219,19 +220,25 @@
block_scale_fp8 = nvfp4_tensor.NVFP4QTensor.get_weights_scaling_factor(
weights_tensor,
16,
global_scale,
)[0]
- print(f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}")
- print(f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}")
+ print(
+ f"lan added global_scale: {global_scale.shape=} {global_scale.dtype=} {global_scale=}"
+ )
+ print(
+ f"lan added block_scale_fp8: {block_scale_fp8.shape=} {block_scale_fp8.dtype=} {block_scale_fp8=}"
+ )
weights_tensor_fp4 = nvfp4_tensor.NVFP4QTensor.quantize(
weights_tensor,
16,
block_scale_fp8,
global_scale,
)[0]._quantized_data
- print(f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}")
+ print(
+ f"lan added weights_tensor_fp4: {weights_tensor_fp4.shape=} {weights_tensor_fp4.dtype=} {weights_tensor_fp4=}"
+ )
block_scale_fp8 = get_trt_tensor(ctx, block_scale_fp8, name + "_block_scale_fp8")
global_scale = to_torch(global_scale, None)
global_scale = get_trt_tensor(ctx, global_scale, name + "_global_scale")
weights_tensor_fp4 = get_trt_tensor(ctx, weights_tensor_fp4, name + "_weights_fp4")
# dequantize block scale from fp8 to original dtype (default is float32)
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-21 21:05:16.552261+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_models_export.py 2025-05-21 21:05:44.770942+00:00
@@ -228,22 +228,28 @@
input_tensor = torch.ones(128, 64, dtype=torch.float16).cuda()
print(f"lan added amax: {input_tensor.abs().amax()}")
model = SimpleNetwork().eval().cuda()
- model.linear1.weight = torch.nn.Parameter(torch.ones(32, 64, dtype=torch.float16).cuda())
- model.linear1.bias = torch.nn.Parameter(torch.ones(128, 32, dtype=torch.float16).cuda())
+ model.linear1.weight = torch.nn.Parameter(
+ torch.ones(32, 64, dtype=torch.float16).cuda()
+ )
+ model.linear1.bias = torch.nn.Parameter(
+ torch.ones(128, 32, dtype=torch.float16).cuda()
+ )
output_pyt = model(input_tensor)
- print(f"lan added model input: {input_tensor=}")
+ print(f"lan added model input: {input_tensor=}")
print(f"lan added model weight: {model.linear1.weight=}")
print(f"lan added model bias: {model.linear1.bias=}")
- print(f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}")
+ print(
+ f"lan added pytorch output_pyt: {output_pyt} {output_pyt.dtype=} {output_pyt.shape=}"
+ )
quant_cfg = mtq.NVFP4_DEFAULT_CFG
mtq.quantize(model, quant_cfg, forward_loop=calibrate_loop)
# model has qdq nodes at this point
-
+
torch.onnx.export(model, input_tensor, "mtq_model.onnx")
with torch.no_grad():
with export_torch_mode():
exp_program = torch.export.export(model, (input_tensor,), strict=False)
Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue)
Type of change
Please delete options that are not relevant and/or add your own.
Checklist: