Commit
·
b32b0ba
1
Parent(s):
b34166e
Added triton client, improved client and predict
Browse files
scikit-learn/adaboost_regressor/client.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import argparse
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
import tritonclient.grpc as grpcclient
|
| 6 |
+
import tritonclient.grpc.model_config_pb2 as mc
|
| 7 |
+
|
| 8 |
+
from sklearn.datasets import fetch_california_housing
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.metrics import mean_squared_error
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def make_prediction(model_server, model_name, model_version, verbose):
|
| 14 |
+
try:
|
| 15 |
+
triton_client = grpcclient.InferenceServerClient(url=model_server, verbose=verbose)
|
| 16 |
+
except Exception as e:
|
| 17 |
+
print("channel creation failed: " + str(e))
|
| 18 |
+
sys.exit(1)
|
| 19 |
+
# Infer
|
| 20 |
+
inputs = []
|
| 21 |
+
outputs = []
|
| 22 |
+
# Load the California Housing dataset
|
| 23 |
+
california = fetch_california_housing()
|
| 24 |
+
X, y = california.data, california.target
|
| 25 |
+
# Split the dataset into training and testing sets
|
| 26 |
+
_, X_test, _, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
|
| 27 |
+
input_data = X_test.astype(np.float32)
|
| 28 |
+
input_label = y_test.astype(np.float32)
|
| 29 |
+
print(f'input_data:\n{input_data[0]}')
|
| 30 |
+
print(f'input_label:\n{input_label[0]}')
|
| 31 |
+
# input_data = np.expand_dims(input_data, axis=0)
|
| 32 |
+
# Initialize the data
|
| 33 |
+
inputs.append(grpcclient.InferInput('float_input', [input_data.shape[0], input_data.shape[1]], "FP32"))
|
| 34 |
+
inputs[0].set_data_from_numpy(input_data)
|
| 35 |
+
outputs.append(grpcclient.InferRequestedOutput('variable'))
|
| 36 |
+
# Test with outputs
|
| 37 |
+
results = triton_client.infer(model_name=model_name, inputs=inputs, outputs=outputs)
|
| 38 |
+
# print("response:\n", results.get_response())
|
| 39 |
+
statistics = triton_client.get_inference_statistics(model_name=model_name)
|
| 40 |
+
# print("statistics:\n", statistics)
|
| 41 |
+
if len(statistics.model_stats) != 1:
|
| 42 |
+
print("FAILED: Inference Statistics")
|
| 43 |
+
sys.exit(1)
|
| 44 |
+
# Get the output arrays from the results
|
| 45 |
+
y_pred = results.as_numpy('variable').squeeze()
|
| 46 |
+
print(f"y_pred:\n{y_pred[0]}")
|
| 47 |
+
mse = mean_squared_error(y_test, y_pred)
|
| 48 |
+
print(f'Mean Squared Error: {mse}')
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
python client.py --model_server localhost:8001 --model_name adaboost_regressor --model_version 1
|
| 53 |
+
"""
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
parser = argparse.ArgumentParser(description="Make predictions using a specific model.")
|
| 56 |
+
parser.add_argument("--model_server", default="localhost:8001", help="The address of the model server.")
|
| 57 |
+
parser.add_argument("--model_name", default="adaboost_regressor", help="The name of the model to use.")
|
| 58 |
+
parser.add_argument("--model_version", default="1", help="The version of the model to use.")
|
| 59 |
+
parser.add_argument("--verbose", action="store_true", required=False, default=False, help='Enable verbose output')
|
| 60 |
+
args = parser.parse_args()
|
| 61 |
+
make_prediction(args.model_server, args.model_name, args.model_version, args.verbose)
|
scikit-learn/adaboost_regressor/predict.py
CHANGED
|
@@ -19,11 +19,13 @@ X, y = dataset.data, dataset.target
|
|
| 19 |
|
| 20 |
# Split the dataset into training and testing sets
|
| 21 |
_, X_test, _, y_test = train_test_split(X, y, test_size=0.25, random_state=random_seed)
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# Use the model to make predictions on the test data
|
| 24 |
y_pred = loaded_model.predict(X_test)
|
|
|
|
| 25 |
|
| 26 |
# Score the model using mean squared error
|
| 27 |
mse = mean_squared_error(y_test, y_pred)
|
| 28 |
-
|
| 29 |
print(f'Mean Squared Error: {mse}')
|
|
|
|
| 19 |
|
| 20 |
# Split the dataset into training and testing sets
|
| 21 |
_, X_test, _, y_test = train_test_split(X, y, test_size=0.25, random_state=random_seed)
|
| 22 |
+
print(f'X_test:\n{X_test[0]}')
|
| 23 |
+
print(f'y_test:\n{y_test[0]}')
|
| 24 |
|
| 25 |
# Use the model to make predictions on the test data
|
| 26 |
y_pred = loaded_model.predict(X_test)
|
| 27 |
+
print(f'y_pred:\n{y_pred[0]}')
|
| 28 |
|
| 29 |
# Score the model using mean squared error
|
| 30 |
mse = mean_squared_error(y_test, y_pred)
|
|
|
|
| 31 |
print(f'Mean Squared Error: {mse}')
|