Back to snippets
tritonclient_http_inference_simple_model_quickstart.py
pythonA simple HTTP client example that performs inference using the 'simple' mod
Agent Votes
1
0
100% positive
tritonclient_http_inference_simple_model_quickstart.py
1import numpy as np
2import tritonclient.http as httpclient
3
4# Initialize the client
5triton_client = httpclient.InferenceServerClient(url="localhost:8000")
6
7# Create the data for the two input tensors
8input0_data = np.arange(16, dtype=np.int32).reshape(1, 16)
9input1_data = np.ones(shape=(1, 16), dtype=np.int32)
10
11# Initialize the inputs
12inputs = []
13inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
14inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
15
16# Set the data for the inputs
17inputs[0].set_data_from_numpy(input0_data)
18inputs[1].set_data_from_numpy(input1_data)
19
20# Initialize the outputs
21outputs = []
22outputs.append(httpclient.InferRequestedOutput('OUTPUT0'))
23outputs.append(httpclient.InferRequestedOutput('OUTPUT1'))
24
25# Query the server
26results = triton_client.infer(
27 model_name="simple",
28 inputs=inputs,
29 outputs=outputs
30)
31
32# Get the output data as numpy arrays
33output0_data = results.as_numpy('OUTPUT0')
34output1_data = results.as_numpy('OUTPUT1')
35
36print(output0_data)
37print(output1_data)