Skip to content

Commit 7d75ab1

Browse files
authored
beginner_source/onnx/export_simple_model_to_onnx_tutorial.py 영어 문서 최신화 (#1043)
* english tutorial update
1 parent 2e1be72 commit 7d75ab1

File tree

1 file changed

+31
-31
lines changed

1 file changed

+31
-31
lines changed

beginner_source/onnx/export_simple_model_to_onnx_tutorial.py

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2,26 +2,26 @@
22
"""
33
`Introduction to ONNX <intro_onnx.html>`_ ||
44
**Exporting a PyTorch model to ONNX** ||
5-
`Extending the ONNX Registry <onnx_registry_tutorial.html>`_
5+
`Extending the ONNX exporter operator support <onnx_registry_tutorial.html>`_ ||
6+
`Export a model with control flow to ONNX <export_control_flow_model_to_onnx_tutorial.html>`_
67
78
Export a PyTorch model to ONNX
89
==============================
910
10-
**Author**: `Thiago Crepaldi <https://github.com/thiagocrepaldi>`_
11+
**Author**: `Ti-Tai Wang <https://github.com/titaiwangms>`_, `Justin Chu <justinchu@microsoft.com>`_, `Thiago Crepaldi <https://github.com/thiagocrepaldi>`_.
1112
1213
.. note::
13-
As of PyTorch 2.1, there are two versions of ONNX Exporter.
14-
15-
* ``torch.onnx.dynamo_export`` is the newest (still in beta) exporter based on the TorchDynamo technology released with PyTorch 2.0
16-
* ``torch.onnx.export`` is based on TorchScript backend and has been available since PyTorch 1.2.0
14+
Starting with PyTorch 2.5, there are two ONNX Exporter options available.
15+
* ``torch.onnx.export(..., dynamo=True)`` is the recommended exporter that leverages ``torch.export`` and Torch FX for graph capture.
16+
* ``torch.onnx.export`` is the legacy approach that relies on the deprecated TorchScript and is no longer recommended for use.
1717
1818
"""
1919

2020
###############################################################################
2121
# In the `60 Minute Blitz <https://tutorials.pytorch.kr/beginner/deep_learning_60min_blitz.html>`_,
2222
# we had the opportunity to learn about PyTorch at a high level and train a small neural network to classify images.
2323
# In this tutorial, we are going to expand this to describe how to convert a model defined in PyTorch into the
24-
# ONNX format using TorchDynamo and the ``torch.onnx.dynamo_export`` ONNX exporter.
24+
# ONNX format using the ``torch.onnx.export(..., dynamo=True)`` ONNX exporter.
2525
#
2626
# While PyTorch is great for iterating on the development of models, the model can be deployed to production
2727
# using different formats, including `ONNX <https://onnx.ai/>`_ (Open Neural Network Exchange)!
@@ -47,8 +47,7 @@
4747
#
4848
# .. code-block:: bash
4949
#
50-
# pip install onnx
51-
# pip install onnxscript
50+
# pip install --upgrade onnx onnxscript
5251
#
5352
# 2. Author a simple image classifier model
5453
# -----------------------------------------
@@ -62,17 +61,16 @@
6261
import torch.nn.functional as F
6362

6463

65-
class MyModel(nn.Module):
66-
64+
class ImageClassifierModel(nn.Module):
6765
def __init__(self):
68-
super(MyModel, self).__init__()
66+
super().__init__()
6967
self.conv1 = nn.Conv2d(1, 6, 5)
7068
self.conv2 = nn.Conv2d(6, 16, 5)
7169
self.fc1 = nn.Linear(16 * 5 * 5, 120)
7270
self.fc2 = nn.Linear(120, 84)
7371
self.fc3 = nn.Linear(84, 10)
7472

75-
def forward(self, x):
73+
def forward(self, x: torch.Tensor):
7674
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
7775
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
7876
x = torch.flatten(x, 1)
@@ -81,16 +79,18 @@ def forward(self, x):
8179
x = self.fc3(x)
8280
return x
8381

82+
8483
######################################################################
8584
# 3. Export the model to ONNX format
8685
# ----------------------------------
8786
#
8887
# Now that we have our model defined, we need to instantiate it and create a random 32x32 input.
8988
# Next, we can export the model to ONNX format.
9089

91-
torch_model = MyModel()
92-
torch_input = torch.randn(1, 1, 32, 32)
93-
onnx_program = torch.onnx.dynamo_export(torch_model, torch_input)
90+
torch_model = ImageClassifierModel()
91+
# Create example inputs for exporting the model. The inputs should be a tuple of tensors.
92+
example_inputs = (torch.randn(1, 1, 32, 32),)
93+
onnx_program = torch.onnx.export(torch_model, example_inputs, dynamo=True)
9494

9595
######################################################################
9696
# As we can see, we didn't need any code change to the model.
@@ -102,13 +102,14 @@ def forward(self, x):
102102
# Although having the exported model loaded in memory is useful in many applications,
103103
# we can save it to disk with the following code:
104104

105-
onnx_program.save("my_image_classifier.onnx")
105+
onnx_program.save("image_classifier_model.onnx")
106106

107107
######################################################################
108108
# You can load the ONNX file back into memory and check if it is well formed with the following code:
109109

110110
import onnx
111-
onnx_model = onnx.load("my_image_classifier.onnx")
111+
112+
onnx_model = onnx.load("image_classifier_model.onnx")
112113
onnx.checker.check_model(onnx_model)
113114

114115
######################################################################
@@ -124,10 +125,10 @@ def forward(self, x):
124125
# :align: center
125126
#
126127
#
127-
# Once Netron is open, we can drag and drop our ``my_image_classifier.onnx`` file into the browser or select it after
128+
# Once Netron is open, we can drag and drop our ``image_classifier_model.onnx`` file into the browser or select it after
128129
# clicking the **Open model** button.
129130
#
130-
# .. image:: ../../_static/img/onnx/image_clossifier_onnx_modelon_netron_web_ui.png
131+
# .. image:: ../../_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png
131132
# :width: 50%
132133
#
133134
#
@@ -155,18 +156,18 @@ def forward(self, x):
155156

156157
import onnxruntime
157158

158-
onnx_input = onnx_program.adapt_torch_inputs_to_onnx(torch_input)
159-
print(f"Input length: {len(onnx_input)}")
160-
print(f"Sample input: {onnx_input}")
161-
162-
ort_session = onnxruntime.InferenceSession("./my_image_classifier.onnx", providers=['CPUExecutionProvider'])
159+
onnx_inputs = [tensor.numpy(force=True) for tensor in example_inputs]
160+
print(f"Input length: {len(onnx_inputs)}")
161+
print(f"Sample input: {onnx_inputs}")
163162

164-
def to_numpy(tensor):
165-
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
163+
ort_session = onnxruntime.InferenceSession(
164+
"./image_classifier_model.onnx", providers=["CPUExecutionProvider"]
165+
)
166166

167-
onnxruntime_input = {k.name: to_numpy(v) for k, v in zip(ort_session.get_inputs(), onnx_input)}
167+
onnxruntime_input = {input_arg.name: input_value for input_arg, input_value in zip(ort_session.get_inputs(), onnx_inputs)}
168168

169-
onnxruntime_outputs = ort_session.run(None, onnxruntime_input)
169+
# ONNX Runtime returns a list of outputs
170+
onnxruntime_outputs = ort_session.run(None, onnxruntime_input)[0]
170171

171172
####################################################################
172173
# 7. Compare the PyTorch results with the ones from the ONNX Runtime
@@ -178,8 +179,7 @@ def to_numpy(tensor):
178179
# For that, we need to execute the PyTorch model with the same input and compare the results with ONNX Runtime's.
179180
# Before comparing the results, we need to convert the PyTorch's output to match ONNX's format.
180181

181-
torch_outputs = torch_model(torch_input)
182-
torch_outputs = onnx_program.adapt_torch_outputs_to_onnx(torch_outputs)
182+
torch_outputs = torch_model(*example_inputs)
183183

184184
assert len(torch_outputs) == len(onnxruntime_outputs)
185185
for torch_output, onnxruntime_output in zip(torch_outputs, onnxruntime_outputs):

0 commit comments

Comments
 (0)