File tree Expand file tree Collapse file tree 1 file changed +4
-3
lines changed
Expand file tree Collapse file tree 1 file changed +4
-3
lines changed Original file line number Diff line number Diff line change @@ -226,11 +226,12 @@ def quantize_pt2(
226226 calibration_data = calibration_data ,
227227 dump_graphs = dump_graphs ,
228228 )
229- # Wrap the model to handle quantized inputs
230- wrapped_module = QuantizedInputWrapper (converted_gm , quant_input_args ).module
229+ # Wrap the model to handle quantized inputs if provided
230+ if quant_input_args is not None :
231+ converted_gm = QuantizedInputWrapper (converted_gm , quant_input_args )
231232
232233 # Apply quant fusion to the exported program
233- program = torch .export .export (wrapped_module , inputs , strict = True )
234+ program = torch .export .export (converted_gm , inputs , strict = True )
234235 fused_program = apply_pre_edge_transform_passes (program , quantizer )
235236
236237 if dump_graphs :
You can’t perform that action at this time.
0 commit comments