In [1]:
import warnings
warnings.filterwarnings('ignore')
In [3]:
%matplotlib inline
%pylab inline
import matplotlib.pyplot as plt
In [7]:
!ls -l tf/1 tf/1/variables
In [9]:
# https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel
In [13]:
!saved_model_cli show --dir tf/1
In [14]:
!saved_model_cli show --dir tf/1 --tag_set serve
In [15]:
!saved_model_cli show --dir tf/1 --tag_set serve --signature_def serving_default
In [16]:
# 0: red
# 1: green
# 2: yellow
!saved_model_cli run --dir tf/1 --tag_set serve --signature_def serving_default --input_exprs inputs=[[100.0,47.0,10.0]]
In [17]:
# first we need to create a bucket on the goolge cloud and upload our model to it
# https://cloud.google.com/storage/docs/creating-buckets#storage-create-bucket-gsutil
!gsutil mb gs://manning_bucket
!gsutil cp -R tf/1 gs://manning_bucket
In [ ]:
!gcloud ml-engine models create "manning_insurance_1"
!gcloud ml-engine versions create "v1" --model "manning_insurance_1" --origin gs://manning_bucket/1
!gcloud ml-engine versions describe "v1" --model "manning_insurance_1"
In [19]:
# one of each category
!cat sample_insurance.json
In [ ]:
# 0: red
# 1: green
# 2: yellow
# https://cloud.google.com/ml-engine/docs/tensorflow/prediction-overview#getting_predictions
!gcloud ml-engine predict --model "manning_insurance_1" --version "v1" --json-instances ./sample_insurance.json
# SCORES
# [0.8658562898635864, 7.318668918511809e-14, 0.13414366543293]
# [0.002760800765827298, 0.8720880746841431, 0.12515118718147278]
# [5.452934419736266e-05, 0.005952719133347273, 0.9939927458763123]
In [3]:
!pip install google-api-python-client
In [24]:
!pip install tensorflow-serving-api
In [1]:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/ml_engine/online_prediction/predict.py
# https://cloud.google.com/ml-engine/docs/tensorflow/online-predict
import googleapiclient.discovery
def predict_json(project, model, instances, version=None):
"""Send json data to a deployed model for prediction.
Args:
project (str): project where the Cloud ML Engine Model is deployed.
model (str): model name.
instances ([Mapping[str: Any]]): Keys should be the names of Tensors
your deployed model expects as inputs. Values should be datatypes
convertible to Tensors, or (potentially nested) lists of datatypes
convertible to tensors.
version: str, version of the model to target.
Returns:
Mapping[str: any]: dictionary of prediction results defined by the
model.
"""
# Create the ML Engine service object.
# To authenticate set the environment variable
# GOOGLE_APPLICATION_CREDENTIALS=<path_to_service_account_file>
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
In [2]:
instances = [{"inputs": [ 160, 18, 100]}, {"inputs": [ 100, 47, 10]}, {"inputs": [ 90, 20, 20]}]
predict_json("sandboxolli", "manning_insurance_1", instances=instances)
Out[2]:
In [ ]:
# https://www.tensorflow.org/serving/
# https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/setup.md#tensorflow-serving-python-api-pip-package-pip
!tensorflow_model_server --port=9000 --model_name=manning_insurance_1 --model_base_path=$(pwd)/tf
In [ ]:
# https://www.tensorflow.org/serving/api_rest
!tensorflow_model_server --rest_api_port=8501 \
--model_name=manning_insurance_1 \
--model_base_path=$(pwd)/tf
In [2]:
!curl -d '{ "instances": [{"inputs": [ 100.0, 47.0, 10.0]}]}' -X POST http://localhost:8501/v1/models/manning_insurance_1:predict
# {
# "predictions": [[0.0027608, 0.872088, 0.125151]
# ]
# }