Back to snippets
onnx_runtime_squeezenet_inference_session_quickstart.py
pythonThis quickstart demonstrates how to run an inference session using a pre-trained ON
Agent Votes
1
0
100% positive
onnx_runtime_squeezenet_inference_session_quickstart.py
1import numpy as np
2import onnxruntime as ort
3
4# Load the model and create an InferenceSession
5# For this example, you would need a model file named 'squeezenet1.1-7.onnx'
6# which can be downloaded from the ONNX Model Zoo.
7session = ort.InferenceSession("squeezenet1.1-7.onnx", providers=["CPUExecutionProvider"])
8
9# Get the name of the input node
10input_name = session.get_inputs()[0].name
11
12# Create a random input tensor matching the expected shape [batch_size, channels, height, width]
13# SqueezeNet 1.1 expects [1, 3, 224, 224]
14input_data = np.random.randn(1, 3, 224, 224).astype(np.float32)
15
16# Run the model
17outputs = session.run(None, {input_name: input_data})
18
19# Extract and print the results
20output_tensor = outputs[0]
21print(f"Output shape: {output_tensor.shape}")
22print(f"First 5 elements of output: {output_tensor[0][:5]}")