MNIST in Keras with Tensorboard

This sample trains an "MNIST" handwritten digit recognition model on a GPU or TPU backend using a Keras model. Data are handled using the tf.data.Datset API. This is a very simple sample provided for educational purposes. Do not expect outstanding TPU performance on a dataset as small as MNIST.

Parameters


In [1]:
BATCH_SIZE = 64
LEARNING_RATE = 0.02
# GCS bucket for training logs and for saving the trained model
# You can leave this empty for local saving, unless you are using a TPU.
# TPUs do not have access to your local instance and can only write to GCS.
BUCKET="" # a valid bucket name must start with gs://

training_images_file   = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file   = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'

Imports


In [2]:
import os, re, math, json, time
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.python.platform import tf_logging
print("Tensorflow version " + tf.__version__)


Tensorflow version 2.0.0-beta1

TPU/GPU detection


In [3]:
tpu = None
try:
  tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection relies on TPU_NAME env var
  tf.tpu.experimental.initialize_tpu_system(tpu)
  strategy = tf.distribute.experimental.TPUStrategy(tpu, steps_per_run=100)
  print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])  
except ValueError:
  gpus = tf.config.experimental.list_logical_devices("GPU")
  if len(gpus) > 1:
    strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])
    print("running on multiple GPUs")
  else:
    strategy = tf.distribute.get_strategy() # the default strategy works on CPU and single GPU
    print("Running on {}".format("a single GPU" if len(gpus)==1 else "CPU"))
    
# adjust batch size and learning rate for distributed computing
global_batch_size = BATCH_SIZE * strategy.num_replicas_in_sync # num replcas is 8 on a single TPU or N when runing on N GPUs.
learning_rate = LEARNING_RATE * strategy.num_replicas_in_sync


Running on a single GPU

In [4]:
#@title visualization utilities [RUN ME]
"""
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
"""

# Matplotlib config
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")

# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
  
  # get one batch from each: 10000 validation digits, N training digits
  unbatched_train_ds = training_dataset.apply(tf.data.experimental.unbatch())
  
  if tf.executing_eagerly():
      # This is the TF 2.0 "eager execution" way of iterating through a tf.data.Dataset
      for v_images, v_labels in validation_dataset:
        break

      for t_images, t_labels in unbatched_train_ds.batch(N):
        break

      validation_digits = v_images.numpy()
      validation_labels = v_labels.numpy()
      training_digits   = t_images.numpy()
      training_labels   = t_labels.numpy()
  else:
    # This is the legacy TF 1.x way of iterating through a tf.data.Dataset
    v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
    t_images, t_labels = unbatched_train_ds.batch(N).make_one_shot_iterator().get_next()
    # Run once, get one batch. Session.run returns numpy results
    with tf.Session() as ses:
      (validation_digits, validation_labels,
       training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
  
  # these were one-hot encoded in the dataset
  validation_labels = np.argmax(validation_labels, axis=1)
  training_labels = np.argmax(training_labels, axis=1)
  
  return (training_digits, training_labels,
          validation_digits, validation_labels)

# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
  font_labels = []
  img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
  font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
  font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
  d = PIL.ImageDraw.Draw(img)
  for i in range(n):
    font_labels.append(i%10)
    d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
  font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
  font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
  return font_digits, font_labels

# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
  plt.figure(figsize=(13,3))
  digits = np.reshape(digits, [n, 28, 28])
  digits = np.swapaxes(digits, 0, 1)
  digits = np.reshape(digits, [28, 28*n])
  plt.yticks([])
  plt.xticks([28*x+14 for x in range(n)], predictions)
  for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
    if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
  plt.imshow(digits)
  plt.grid(None)
  plt.title(title)
  
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
  idx = np.argsort(predictions==labels) # sort order: unrecognized first
  for i in range(lines):
    display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
                   "{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)

Colab-only auth for this notebook and the TPU


In [5]:
#IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ  # this is always set on Colab, the value is 0 or 1 depending on GPU presence
#if IS_COLAB_BACKEND:
#  from google.colab import auth
#  auth.authenticate_user() # Authenticates the backend and also the TPU using your credentials so that they can access your private GCS buckets

tf.data.Dataset: parse files and prepare training and validation datasets

Please read the best practices for building input pipelines with tf.data.Dataset


In [6]:
def read_label(tf_bytestring):
    label = tf.io.decode_raw(tf_bytestring, tf.uint8)
    label = tf.reshape(label, [])
    label = tf.one_hot(label, 10)
    return label
  
def read_image(tf_bytestring):
    image = tf.io.decode_raw(tf_bytestring, tf.uint8)
    image = tf.cast(image, tf.float32)/256.0
    image = tf.reshape(image, [28*28])
    return image
  
def load_dataset(image_file, label_file):
    imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
    imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
    labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
    labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
    dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
    return dataset 
  
def get_training_dataset(image_file, label_file, batch_size):
    dataset = load_dataset(image_file, label_file)
    dataset = dataset.cache()  # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
    dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
    dataset = dataset.repeat() # Mandatory for Keras for now
    dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
    dataset = dataset.prefetch(-1)  # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
    return dataset
  
def get_validation_dataset(image_file, label_file):
    dataset = load_dataset(image_file, label_file)
    dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
    dataset = dataset.repeat() # Mandatory for Keras for now
    dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
    return dataset

# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, global_batch_size)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)

Let's have a look at the data


In [8]:
N = 24
(training_digits, training_labels,
 validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)


Keras model: 3 convolutional layers, 2 dense layers


In [13]:
# This model trains to 99.4%— sometimes 99.5%— accuracy in 10 epochs (with a batch size of 64)

def make_model():
    
    model = tf.keras.Sequential(
      [
        tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),

        tf.keras.layers.Conv2D(filters=6, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm
        tf.keras.layers.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before "relu"
        tf.keras.layers.Activation('relu'), # activation after batch norm

        tf.keras.layers.Conv2D(filters=12, kernel_size=6, padding='same', use_bias=False, strides=2),
        tf.keras.layers.BatchNormalization(scale=False, center=True),
        tf.keras.layers.Activation('relu'),

        tf.keras.layers.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2),
        tf.keras.layers.BatchNormalization(scale=False, center=True),
        tf.keras.layers.Activation('relu'),

        tf.keras.layers.Flatten(),
          
        tf.keras.layers.Dense(200, use_bias=False),
        tf.keras.layers.BatchNormalization(scale=False, center=True),
        tf.keras.layers.Activation('relu'),
          
        tf.keras.layers.Dropout(0.5), # Dropout on dense layer only
        tf.keras.layers.Dense(10, activation='softmax')
      ])

    model.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
    
with strategy.scope(): # the new way of handling distribution strategies in Tensorflow 1.14+
    model = make_model()

# print model layers
model.summary()
                        
# set up learning rate decay
lr_decay = tf.keras.callbacks.LearningRateScheduler(lambda epoch: learning_rate * math.pow(0.5, 1+epoch) + learning_rate/200, verbose=True)

# set up Tensorboard logs
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
log_dir=os.path.join(BUCKET, 'mnist-logs', timestamp)
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=50*global_batch_size)
print("Tensorboard loggs written to: ", log_dir)


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
reshape_2 (Reshape)          (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 28, 28, 6)         54        
_________________________________________________________________
batch_normalization_8 (Batch (None, 28, 28, 6)         18        
_________________________________________________________________
activation_8 (Activation)    (None, 28, 28, 6)         0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 14, 14, 12)        2592      
_________________________________________________________________
batch_normalization_9 (Batch (None, 14, 14, 12)        36        
_________________________________________________________________
activation_9 (Activation)    (None, 14, 14, 12)        0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 7, 7, 24)          10368     
_________________________________________________________________
batch_normalization_10 (Batc (None, 7, 7, 24)          72        
_________________________________________________________________
activation_10 (Activation)   (None, 7, 7, 24)          0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 1176)              0         
_________________________________________________________________
dense_4 (Dense)              (None, 200)               235200    
_________________________________________________________________
batch_normalization_11 (Batc (None, 200)               600       
_________________________________________________________________
activation_11 (Activation)   (None, 200)               0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 200)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 10)                2010      
=================================================================
Total params: 250,950
Trainable params: 250,466
Non-trainable params: 484
_________________________________________________________________
Tensorboard loggs written to:  gs://ml1-demo-martin/mnist-logs/2019-06-17-20-54-53

Train and validate the model


In [14]:
EPOCHS = 10
steps_per_epoch = 60000//global_batch_size  # 60,000 items in this dataset
print("Step (batches) per epoch: ", steps_per_epoch)

# Counting steps and batches on TPU: the tpu.keras_to_tpu_model API regards the batch size of the input dataset
# as the per-core batch size. The effective batch size is 8x more because Cloud TPUs have 8 cores. It increments
# the step by +8 everytime a global batch (8 per-core batches) is processed. Therefore batch size and steps_per_epoch
# settings can stay as they are for TPU training. The training will just go faster.
# Warning: this might change in the final version of the Keras/TPU API.
  
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
                            validation_data=validation_dataset, validation_steps=1, callbacks=[lr_decay, tb_callback])


Step (batches) per epoch:  937

Epoch 00001: LearningRateScheduler reducing learning rate to 0.0101.
Epoch 1/10
  1/937 [..............................] - ETA: 56:54 - loss: 3.2441 - accuracy: 0.0625
W0617 20:55:04.433886 139817064380160 callbacks.py:241] Method (on_train_batch_end) is slow compared to the batch update (1.589654). Check your callbacks.
937/937 [==============================] - 31s 33ms/step - loss: 0.1410 - accuracy: 0.9570 - val_loss: 0.0581 - val_accuracy: 0.9808

Epoch 00002: LearningRateScheduler reducing learning rate to 0.0051.
Epoch 2/10
937/937 [==============================] - 22s 23ms/step - loss: 0.0538 - accuracy: 0.9839 - val_loss: 0.0327 - val_accuracy: 0.9896

Epoch 00003: LearningRateScheduler reducing learning rate to 0.0026.
Epoch 3/10
937/937 [==============================] - 22s 24ms/step - loss: 0.0338 - accuracy: 0.9899 - val_loss: 0.0209 - val_accuracy: 0.9928

Epoch 00004: LearningRateScheduler reducing learning rate to 0.00135.
Epoch 4/10
937/937 [==============================] - 22s 23ms/step - loss: 0.0217 - accuracy: 0.9931 - val_loss: 0.0175 - val_accuracy: 0.9943

Epoch 00005: LearningRateScheduler reducing learning rate to 0.0007250000000000001.
Epoch 5/10
  1/937 [..............................] - ETA: 13s - loss: 0.0022 - accuracy: 1.0000
W0617 20:56:34.297800 139817064380160 callbacks.py:241] Method (on_train_batch_end) is slow compared to the batch update (0.198440). Check your callbacks.
937/937 [==============================] - 22s 24ms/step - loss: 0.0159 - accuracy: 0.9949 - val_loss: 0.0169 - val_accuracy: 0.9944

Epoch 00006: LearningRateScheduler reducing learning rate to 0.0004125.
Epoch 6/10
937/937 [==============================] - 22s 23ms/step - loss: 0.0139 - accuracy: 0.9959 - val_loss: 0.0173 - val_accuracy: 0.9944

Epoch 00007: LearningRateScheduler reducing learning rate to 0.00025625.
Epoch 7/10
937/937 [==============================] - 21s 23ms/step - loss: 0.0122 - accuracy: 0.9963 - val_loss: 0.0159 - val_accuracy: 0.9945

Epoch 00008: LearningRateScheduler reducing learning rate to 0.000178125.
Epoch 8/10
937/937 [==============================] - 21s 23ms/step - loss: 0.0102 - accuracy: 0.9969 - val_loss: 0.0166 - val_accuracy: 0.9942

Epoch 00009: LearningRateScheduler reducing learning rate to 0.0001390625.
Epoch 9/10
937/937 [==============================] - 22s 23ms/step - loss: 0.0103 - accuracy: 0.9969 - val_loss: 0.0164 - val_accuracy: 0.9940

Epoch 00010: LearningRateScheduler reducing learning rate to 0.00011953125000000001.
Epoch 10/10
937/937 [==============================] - 22s 23ms/step - loss: 0.0093 - accuracy: 0.9974 - val_loss: 0.0161 - val_accuracy: 0.9942

Visualize predictions


In [15]:
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)

# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)


Export the model for serving from ML Engine


In [16]:
class ServingInput(tf.keras.layers.Layer):
  # the important detail in this boilerplate code is "trainable=False"
  def __init__(self, name, dtype, batch_input_shape=None):
    super(ServingInput, self).__init__(trainable=False, name=name, dtype=dtype, batch_input_shape=batch_input_shape)
  def get_config(self):
    return {'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'name': self.name }

  def call(self, inputs):
    # When the deployed model is called through its REST API,
    # the JSON payload is parsed automatically, transformed into
    # a tensor and passed to this input layer. You can perform
    # additional transformations, such as decoding JPEGs for example,
    # before sending the data to your model. However, you can only
    # use tf.xxxx operations.
    return inputs

# little wrinkle: must copy the model from TPU to CPU manually. This is a temporary workaround.
restored_model = make_model()
restored_model.set_weights(model.get_weights()) # this copied the weights from TPU, does nothing on GPU

# add the serving input layer
serving_model = tf.keras.Sequential()
serving_model.add(ServingInput('serving', tf.float32, (None, 28*28)))
serving_model.add(restored_model)
export_path = os.path.join(BUCKET, 'mnist-export', timestamp)
tf.saved_model.save(serving_model, export_path)
print("Model exported to: ", export_path)


Model exported to:  gs://ml1-demo-martin/mnist-export/2019-06-17-20-54-53

Deploy the trained model to AI Platform

Push your trained model to production on AI Platform for a serverless, autoscaled, REST API experience.

You will need a GCS bucket and a GCP project for this. Models deployed on AI Platform autoscale to zero if not used. There will be no ML Engine charges after you are done testing. Google Cloud Storage incurs charges. Empty the bucket after deployment if you want to avoid these. Once the model is deployed, the bucket is not useful anymore.

Cloud Configuration


In [17]:
# Enable model deployment here
DEPLOY = False  # #@param {type:"boolean"}
# Create the model only once, after that, create new versions of the same model
CREATE_MODEL = True #@param {type:"boolean"}
# Models are deployed in your cloud project
PROJECT = "" #@param {type:"string"}

MODEL_NAME = "mnist" #@param {type:"string"}
MODEL_VERSION = "v0" #@param {type:"string"}

if DEPLOY:
    assert PROJECT, 'For this part, you need a GCP project. Head to http://console.cloud.google.com/ and create one.'
    assert re.search(r'gs://.+', export_path), 'For this part, the model must have been exported to a GCS bucket.'

Deploy the model

This uses the command-line interface. You can do the same thing through the ML Engine UI at https://console.cloud.google.com/mlengine/models


In [18]:
# Create the model
if DEPLOY and CREATE_MODEL:
  !gcloud ai-platform models create {MODEL_NAME} --project={PROJECT} --regions=us-central1

In [19]:
# Create a version of this model (you can add --async at the end of the line to make this call non blocking)
# Additional config flags are available: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions
# You can also deploy a model that is stored locally by providing a --staging-bucket=... parameter
if DEPLOY:
    !echo "Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/{MODEL_NAME}"
    !gcloud ai-platform versions create {MODEL_VERSION} --model={MODEL_NAME} --origin="{export_path}" --project={PROJECT} --runtime-version=1.13 --python-version=3.5


Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/mnist
Creating version (this might take a few minutes)......done.

Test the deployed model

Your model is now available as a REST API. Let us try to call it. The cells below use the "gcloud ml-engine" command line tool but any tool that can send a JSON payload to a REST endpoint will work.


In [27]:
# prepare digits to send to online prediction endpoint
digits = np.concatenate((font_digits, validation_digits[:100-N]))
labels = np.concatenate((font_labels, validation_labels[:100-N]))
with open("digits.json", "w") as f:
  for digit in digits:
    # the format for ML Engine online predictions is: one JSON object per line
    data = json.dumps({"serving_input": digit.tolist()})  # "serving_input" because the ServingInput layer was named "serving". Keras appends "_input"
    f.write(data+'\n')

In [28]:
if DEPLOY: # Request online predictions from deployed model (REST API) using the "gcloud ai-platform" command line.
    predictions = !gcloud ai-platform predict --model={MODEL_NAME} --json-instances digits.json --project={PROJECT} --version {MODEL_VERSION}
    print(predictions)

    probabilities = np.stack([json.loads(p) for p in predictions[1:]]) # first line is the name of the input layer: drop it, parse the rest
    predictions = np.argmax(probabilities, axis=1)
    display_top_unrecognized(digits, predictions, labels, N, 100//N)


['SEQUENTIAL_3', '[0.9995309114456177, 6.821802855938586e-08, 1.348528985545272e-05, 7.678394808863231e-07, 2.3415903172008257e-07, 1.1962074495386332e-06, 8.77387064974755e-05, 1.5032182432150876e-07, 8.149907080223784e-05, 0.000283890578430146]', '[5.614039309875807e-06, 0.9873712062835693, 0.009004256688058376, 0.0008932161726988852, 8.18662010715343e-06, 0.000137231225380674, 1.317857027061109e-06, 0.002547331852838397, 6.500814833998447e-06, 2.5052706405404024e-05]', '[1.725525362417102e-05, 8.559824209442013e-07, 0.9997863173484802, 2.1992360416334122e-05, 1.0510754933079625e-08, 1.7401742269385068e-08, 2.205743854233333e-08, 6.762734301446471e-06, 0.00016615158529020846, 6.451750209635065e-07]', '[1.2005564077599562e-11, 1.0131971706073273e-08, 1.0608012246393628e-08, 0.9999961853027344, 1.1360462917608949e-11, 3.505483846311108e-06, 1.9181086840375228e-13, 1.7175035837979635e-09, 1.7622382442183948e-09, 3.005939106515143e-07]', '[6.95781068316137e-07, 7.378365012300492e-07, 5.364466346691188e-07, 8.412136764945899e-09, 0.9999117851257324, 1.8744462977338117e-06, 4.842930820814217e-07, 7.166503564803861e-06, 2.720597365168942e-07, 7.643037679372355e-05]', '[7.871929219582796e-10, 1.7345658243073103e-09, 9.131710144993921e-12, 3.2828717166921706e-07, 9.105500207995387e-12, 0.9999995231628418, 1.0238927483641191e-09, 3.502206857852741e-10, 3.0848915089087825e-10, 1.6115193091081892e-07]', '[0.013977192342281342, 2.0651686583050832e-08, 2.950507616006348e-09, 7.999262408020513e-08, 2.3220909639576348e-08, 0.10978977382183075, 0.875983476638794, 1.93699616346521e-08, 0.0002331644354853779, 1.621389492356684e-05]', '[1.6360352219635388e-06, 0.0009943356271833181, 0.004449922125786543, 0.00018955333507619798, 1.1868663705172366e-06, 6.011659024807159e-06, 4.881439252812925e-08, 0.9943310022354126, 1.3488450349541381e-05, 1.2867492841905914e-05]', '[0.00017634371761232615, 6.018330509505176e-07, 2.5622388420742936e-05, 0.0018575078574940562, 3.786277602557675e-07, 0.0037723605055361986, 5.4176474804989994e-05, 1.0826948226849709e-07, 0.9939836859703064, 0.00012925842020194978]', '[3.241093145334162e-05, 2.5333235953439726e-08, 1.5083598555065691e-05, 0.0011213007383048534, 1.1825327419501264e-05, 6.824900628998876e-05, 3.2929208249043995e-09, 2.1661652738202974e-07, 0.00012185523519292474, 0.9986289739608765]', '[0.9979310035705566, 2.397466687398264e-06, 2.3700766178080812e-05, 3.884177317559079e-07, 3.994429062004201e-05, 0.0002998483250848949, 8.811329462332651e-05, 0.00016238787793554366, 5.061339834355749e-05, 0.001401608344167471]', '[2.5397062586307584e-07, 0.9999707937240601, 3.027790398846264e-06, 8.370606963126193e-08, 1.1374279893061612e-05, 6.138293429103214e-06, 2.459642473695567e-06, 7.981932412803872e-07, 5.018819138058461e-06, 8.625420377939008e-08]', '[4.874713340541348e-05, 0.00017626213957555592, 0.9250952005386353, 0.07154174149036407, 1.720018616424568e-07, 1.592565058672335e-05, 5.050760591984726e-07, 0.00018977266154251993, 0.0026654910761862993, 0.00026603523292578757]', '[6.02421668105535e-09, 5.761561169492779e-06, 3.0210339900804684e-05, 0.9999126195907593, 1.519119763315757e-08, 3.817936158156954e-05, 2.5200572739692007e-09, 1.4001315093992162e-06, 4.547946446109563e-07, 1.1273631571384612e-05]', '[1.042099739834157e-07, 0.00016030759434215724, 1.4813597772445064e-05, 3.058987090298615e-07, 0.9996210336685181, 1.6304922610288486e-05, 5.6429830692650285e-06, 2.905103610828519e-05, 5.7608035604062025e-06, 0.00014668832591269165]', '[3.6443104818317806e-06, 3.421477595111355e-05, 4.922641892335378e-06, 0.0009003898594528437, 5.4581409614229415e-08, 0.9988288283348083, 2.291659257025458e-06, 1.533047452539904e-06, 1.0811541869770736e-05, 0.00021323896362446249]', '[0.03612537309527397, 4.837744995711546e-07, 4.359319571989317e-09, 3.149882843445084e-08, 1.0516500879020896e-06, 0.05919991433620453, 0.9045903086662292, 5.659814519276551e-07, 2.0475414203247055e-05, 6.175668386276811e-05]', '[4.8438167141284794e-05, 0.007843395695090294, 0.03675508126616478, 0.181782528758049, 1.5911044101812877e-05, 0.00036382785765454173, 8.55094549478963e-06, 0.7720772624015808, 0.0007978274952620268, 0.0003073261468671262]', '[6.69898945488967e-05, 7.785733760101721e-05, 0.0008007738506421447, 0.005132579710334539, 2.3204203898785636e-05, 0.02383464016020298, 0.0007777181454002857, 8.070377589319833e-06, 0.968994677066803, 0.00028343472513370216]', '[0.0020609060302376747, 9.253848475054838e-06, 0.000385265942895785, 5.7529599871486425e-05, 0.017193308100104332, 0.0061505758203566074, 4.370616807136685e-06, 0.0012006601318717003, 0.0018023167503997684, 0.9711357951164246]', '[0.9979310035705566, 2.397466687398264e-06, 2.3700766178080812e-05, 3.884177317559079e-07, 3.994429062004201e-05, 0.0002998483250848949, 8.811329462332651e-05, 0.00016238787793554366, 5.061339834355749e-05, 0.001401608344167471]', '[2.5397062586307584e-07, 0.9999707937240601, 3.027790398846264e-06, 8.370606963126193e-08, 1.1374279893061612e-05, 6.138293429103214e-06, 2.459642473695567e-06, 7.981932412803872e-07, 5.018819138058461e-06, 8.625420377939008e-08]', '[4.874713340541348e-05, 0.00017626213957555592, 0.9250952005386353, 0.07154174149036407, 1.720018616424568e-07, 1.592565058672335e-05, 5.050760591984726e-07, 0.00018977266154251993, 0.0026654910761862993, 0.00026603523292578757]', '[6.02421668105535e-09, 5.761561169492779e-06, 3.0210339900804684e-05, 0.9999126195907593, 1.519119763315757e-08, 3.817936158156954e-05, 2.5200572739692007e-09, 1.4001315093992162e-06, 4.547946446109563e-07, 1.1273631571384612e-05]', '[2.9241661175793965e-11, 9.504086762035513e-08, 6.736951974062322e-08, 8.389898553673447e-09, 1.5824138444031632e-08, 4.1891320967657464e-10, 9.223937396220586e-14, 0.9999997615814209, 3.0012817231472866e-10, 3.9355018799369645e-08]', '[6.140259074527421e-07, 2.846976258297218e-06, 0.9999958276748657, 3.1516247389618e-08, 2.469189652742898e-08, 3.301713569214826e-09, 3.509841306481576e-08, 6.821722564609445e-08, 5.592870593318366e-07, 4.04307609613852e-09]', '[6.0411919911018686e-09, 0.9999876022338867, 2.1273869776905485e-07, 3.009656301955488e-09, 4.926891506329412e-06, 1.773209845623569e-07, 2.326869292801348e-07, 5.914119356020819e-06, 8.550971983822819e-07, 6.027809007491669e-08]', '[0.999977707862854, 1.375930569480488e-08, 7.235244936509844e-08, 3.7005836173165108e-09, 2.5038604078986282e-08, 6.108584784669802e-07, 1.7938409655471332e-05, 4.230865329191147e-07, 2.1607414169011463e-07, 3.0181504371284973e-06]', '[5.446790796170831e-10, 3.038302409663629e-08, 1.739641874998199e-09, 1.0482946108392177e-11, 0.9999672174453735, 5.214030807110248e-08, 3.348933352853578e-09, 1.2152027295542212e-07, 7.747392949397636e-09, 3.2520201784791425e-05]', '[1.3894152495197432e-09, 0.9999958276748657, 2.082306060913197e-08, 9.970083092447624e-11, 1.0869351854125853e-06, 1.2585715802870823e-09, 6.079676762027475e-09, 2.8888159704365535e-06, 2.3422639117143262e-07, 2.414623700985885e-08]', '[1.1467758298522313e-08, 8.87931582838064e-06, 1.3376140373111411e-07, 1.5003855935447064e-09, 0.9996637105941772, 1.101608404496801e-06, 1.4537515653501032e-06, 1.5589324675602256e-06, 0.00030427673482336104, 1.8904695025412366e-05]', '[8.386220542888623e-06, 7.495257159462199e-05, 0.00011450121382949874, 0.0003661705704871565, 0.0010088406270369887, 0.0004910808056592941, 1.0363794444856467e-06, 5.284991129883565e-05, 0.002724302001297474, 0.9951578974723816]', '[3.80710840630627e-09, 2.479329588855883e-11, 8.723691040379844e-12, 4.382512686085249e-10, 1.9019943606202183e-11, 0.9999871253967285, 1.2576069821079727e-05, 1.1238484969033902e-11, 2.0094316255381273e-07, 3.883914967417468e-09]', '[2.242541974339929e-08, 5.919531531617395e-09, 5.182195783604016e-10, 1.2949535665995882e-08, 7.838465535314754e-05, 3.4856262232096924e-07, 2.745259852776627e-10, 6.37146586086601e-05, 0.0001141423563240096, 0.9997432827949524]', '[1.0, 9.125259575748501e-11, 3.190830932808808e-09, 7.1266959347782155e-12, 7.862505091438265e-11, 1.358905876713834e-09, 1.9881216672956725e-09, 2.628453232844663e-09, 1.4097119915845724e-08, 3.750695043436281e-08]', '[9.59040189627558e-05, 7.316403594082033e-10, 2.966464296427773e-10, 4.6532241948504804e-10, 2.240180840828998e-08, 2.8563508749357425e-05, 0.9998743534088135, 5.861641172444365e-10, 1.1278613101239898e-06, 2.93438939813484e-09]', '[2.5198104713908265e-10, 3.164632791929911e-11, 4.713517770538189e-11, 1.2313630115556862e-09, 1.3956606380816083e-05, 3.0878977153037113e-09, 1.2389565285167436e-12, 3.37994634946881e-07, 2.6074958441313356e-06, 0.9999830722808838]', '[0.9999992847442627, 5.034661576530652e-10, 4.0946107615624783e-10, 1.5828272720286307e-11, 2.9968755255183055e-10, 5.051266072086946e-09, 2.84770553804492e-08, 9.69212408108433e-08, 2.990232772503987e-08, 6.214075938260066e-07]', '[1.6555004589413613e-10, 1.0, 2.7418330716955097e-08, 4.2779205178256063e-10, 1.985318398567415e-08, 1.5611055559361375e-08, 3.8012362146844225e-09, 5.137369285534987e-08, 4.100852990518433e-09, 5.68603386597033e-10]', '[1.071000843566594e-09, 2.1870808097901318e-07, 1.8655996747440895e-09, 0.0007713506347499788, 7.245106597997264e-11, 0.9992282390594482, 1.9852965493782904e-08, 9.338951656445715e-08, 4.9925677814144365e-08, 1.1909024877354568e-08]', '[1.220084655528808e-08, 1.085821016433286e-10, 2.9181892458041148e-09, 6.48407691983266e-08, 4.798348072654335e-06, 2.015239175534589e-08, 9.781003264264232e-12, 5.377852630772395e-06, 1.8531451360104256e-06, 0.9999878406524658]', '[1.9106660698042788e-10, 6.516999206951368e-08, 5.1402206935335926e-08, 7.586469230602688e-09, 1.0017210394508425e-09, 3.002467441337586e-10, 2.2620751775090202e-14, 0.9999998807907104, 2.885169533839438e-11, 4.4994941106324404e-08]', '[2.3637664980924455e-06, 3.1588504498358816e-05, 0.0004888815456070006, 0.9868420958518982, 1.3149028745829128e-06, 0.011827662587165833, 1.1308168268442387e-06, 3.443649984546937e-05, 0.0007421650225296617, 2.826244235620834e-05]', '[1.0749235529461387e-10, 7.60606269523123e-07, 1.4830078498562216e-07, 5.318023799105731e-10, 0.9999890327453613, 1.3674073784386565e-07, 1.9545798313203022e-09, 9.109331585932523e-06, 1.0856700427552823e-08, 8.644935292068112e-07]', '[1.4321253161142522e-07, 4.479606559470994e-06, 9.8882871668593e-08, 4.074724074598635e-06, 0.0012353596976026893, 1.554201662656851e-05, 4.729821423410385e-09, 0.006285009905695915, 4.0703605918679386e-05, 0.992414653301239]', '[2.056194261967903e-06, 1.4234068146379286e-08, 4.2358760943272955e-10, 2.903897788897325e-09, 7.584787908854196e-09, 0.005903789773583412, 0.994088888168335, 3.365637213370576e-10, 5.384622454585042e-06, 4.8313375522468505e-09]', '[2.2885235011926852e-05, 1.1724895188081064e-07, 2.771065377160653e-09, 2.3090131195946384e-10, 5.9764297475339845e-06, 3.215378455934115e-05, 0.999937891960144, 1.0438720110528266e-08, 8.580084340792382e-07, 8.622274805247798e-08]', '[1.4402137260560721e-09, 5.079160980692166e-10, 1.8339824450763764e-11, 1.2227448564772203e-07, 1.032170238168817e-09, 0.9999985694885254, 1.0559917029695498e-07, 2.2170595914694502e-10, 1.2424190565241133e-08, 1.2136755458413973e-06]', '[2.8747932034889345e-08, 6.779396244382951e-06, 1.2338712167547783e-06, 2.1291523211175445e-08, 0.9996360540390015, 3.701770765474066e-06, 1.267329992060695e-07, 7.15168280294165e-05, 1.0519212310100556e-06, 0.0002796205226331949]', '[0.9998750686645508, 1.6569126515264543e-08, 3.262174004703411e-06, 4.6719701884967435e-08, 1.1674458164634416e-06, 2.538602529966738e-06, 4.6988498070277274e-05, 1.6175968085008208e-06, 5.1809645810863e-05, 1.738327591738198e-05]', '[1.515305658728039e-09, 4.3963436269223166e-07, 6.924084772208516e-08, 5.997990371042761e-08, 2.8925162709469987e-08, 4.4938126109173027e-08, 6.081318716828765e-13, 0.9999990463256836, 1.4168823059268476e-10, 2.2222630491341988e-07]', '[8.787587357002996e-14, 4.1490692825885134e-11, 2.448292870521618e-12, 1.4164745629685269e-15, 0.9999998807907104, 1.087738996097265e-10, 6.464035578503113e-13, 8.798557438183252e-10, 5.054537951382221e-12, 7.030607918068199e-08]', '[0.9999740123748779, 5.780460128335108e-07, 1.260877979802899e-06, 2.0531508937438048e-07, 1.957173196842632e-07, 5.569388008552778e-07, 1.9728977918020973e-07, 1.2280109331186395e-05, 6.228458005352877e-06, 4.4444914237828925e-06]', '[4.682164433944536e-09, 0.999995231628418, 1.434280420653522e-06, 4.5457870356813146e-08, 9.337732080894057e-07, 1.9934908834784437e-07, 4.537990250241819e-08, 1.794227387108549e-06, 2.810699015753926e-07, 6.876297220514971e-08]', '[1.0041194221585559e-12, 7.203285079526722e-09, 1.3245256447191878e-08, 0.9999991655349731, 3.043895135945718e-10, 4.264798576514295e-07, 7.2051766679751e-14, 3.727271646880581e-08, 1.2518746039802409e-09, 3.3831673817985575e-07]', '[3.0448683574491042e-09, 0.9999864101409912, 1.4913854329279275e-06, 6.8175443068696495e-09, 5.111267000756925e-06, 1.7768235238690977e-07, 2.8198710921856218e-08, 5.716333816963015e-06, 3.001856043738371e-07, 7.384357445516798e-07]', '[1.831650303164064e-14, 3.1951301604493665e-09, 2.7602331531539903e-09, 0.9999998807907104, 3.4128771857211415e-12, 5.920286128002772e-08, 1.6735168752082114e-14, 5.171458261621353e-10, 7.431475435026869e-10, 5.924550294800213e-10]', '[8.432841127614665e-07, 1.4484390931102098e-07, 1.2408359850724082e-07, 2.8230261010786517e-13, 0.9999971389770508, 7.311952288091561e-08, 1.6475861457365681e-06, 4.9076991359697786e-09, 1.5689061161339168e-09, 1.233887214624474e-08]', '[8.121524980309047e-11, 4.1206118339687237e-07, 4.796780217475316e-07, 1.3330413217715886e-08, 1.4236536394207633e-08, 1.2003712468455774e-09, 4.4432692117638006e-13, 0.9999991655349731, 7.084196007589583e-10, 3.697502304333966e-09]', '[4.968343273503706e-05, 1.531530506326817e-05, 0.9998499155044556, 4.0794609958538786e-05, 1.2794012604899763e-07, 7.634525900357403e-08, 3.8144079894664173e-08, 1.0784380719996989e-05, 1.9089166016783565e-05, 1.4158831618260592e-05]', '[2.6347329651343898e-08, 6.3490924731013365e-06, 0.0015452703228220344, 4.862529294769047e-06, 6.463727117989038e-07, 2.734661386227799e-08, 1.1530367632905936e-10, 0.9984425902366638, 2.9776389354196908e-08, 1.0771223912797723e-07]', '[1.4999505804080826e-10, 0.9999997615814209, 8.20865686534944e-09, 1.0504969810698483e-10, 1.1861824589232128e-07, 2.740555116176324e-09, 8.169400711466324e-10, 1.4927562119737559e-07, 1.6068275598968285e-08, 9.374901743797182e-09]', '[0.00010014794679591432, 0.0001066462864400819, 0.999630331993103, 3.648986603366211e-05, 1.7311315048118558e-07, 1.192259730942169e-07, 6.297900085883157e-07, 1.378235720039811e-05, 0.00010801768075907603, 3.5807724998448975e-06]', '[9.808406309375073e-10, 0.9999997615814209, 1.1548009659634317e-08, 1.2468895360662202e-10, 1.0520867022023594e-07, 6.196810176106737e-09, 1.6927740986716344e-08, 7.677026303554157e-08, 2.4616237936925245e-08, 2.185102987439791e-09]', '[7.948538183200071e-08, 0.9999024868011475, 1.0522463526285719e-05, 1.560739804062905e-07, 4.119052846363047e-06, 1.6827032141009113e-06, 4.146901346757659e-07, 7.778385042911395e-05, 1.5368468666565605e-06, 1.1665563306451077e-06]', '[1.3106979945831654e-09, 2.5441049729124643e-06, 0.00013822368055116385, 2.5299377739429474e-05, 1.9271205928816926e-06, 1.2530561832591047e-07, 2.8049959924225654e-11, 0.9998315572738647, 9.161258418544094e-09, 4.067249790296046e-07]', '[7.946480929874045e-14, 1.1224919944652356e-06, 3.405666373912375e-11, 2.253558667800193e-12, 0.999997615814209, 4.989676227751261e-09, 5.7630802907659984e-11, 7.566369930600558e-08, 1.2019891748593636e-09, 1.218210741171788e-06]', '[8.246256300026289e-08, 0.007183757144957781, 0.9919107556343079, 3.1070444492797833e-06, 0.0007720953435637057, 1.5808302123332396e-05, 3.156811362714507e-05, 7.362661563092843e-05, 9.109921847993974e-06, 7.989863348711879e-08]', '[4.3122678761164934e-09, 2.46995004999917e-05, 3.50812406395562e-05, 0.9997605681419373, 1.3805977516767598e-07, 0.00016924711235333234, 2.724075720550445e-08, 2.0330735424067825e-06, 5.240068276179954e-06, 2.9559839731518878e-06]', '[1.9478351021007256e-07, 9.572756454190312e-08, 1.7072168123632991e-09, 3.7412312394735636e-06, 1.5868849567723942e-09, 0.9999741315841675, 6.323773959593382e-06, 1.8563760306733457e-08, 1.526310916233342e-05, 1.016721853375202e-07]', '[8.39075440239867e-08, 0.999868631362915, 3.689380901050754e-05, 3.047372729270137e-06, 3.759926767088473e-05, 2.5027673473232426e-05, 2.4290120563819073e-06, 9.642156328482088e-06, 1.5881196304690093e-05, 8.062713732215343e-07]', '[6.091185156265055e-08, 8.606675550026921e-08, 0.9999963045120239, 9.794753452752047e-08, 4.911424511533369e-08, 1.5087614713138464e-08, 1.8290450043423334e-08, 1.3909672702538955e-07, 3.247811264373013e-06, 7.81704123653526e-09]', '[6.793491463819024e-11, 4.956816823664667e-08, 5.487192922259965e-10, 1.4443453377754878e-10, 0.9999778270721436, 1.7041895716829458e-07, 4.072955028799896e-10, 8.791826076048892e-07, 5.0319663102982304e-08, 2.1028305127401836e-05]', '[2.758627473588987e-13, 6.996905721479152e-09, 4.667184000384239e-11, 9.343193536556771e-14, 0.9999998807907104, 5.1730588701559554e-09, 1.3042590618628225e-10, 2.436041368625297e-09, 5.62798106729101e-11, 5.987833873177806e-08]', '[2.9612865546368994e-06, 3.095821909937513e-08, 1.250194253676895e-10, 6.499039639784598e-10, 1.5362656924367002e-08, 7.563500548712909e-05, 0.9999210834503174, 5.79957566346212e-12, 2.606701343665918e-07, 4.4722273107034027e-10]', '[1.6437477464228967e-12, 3.916085233868216e-10, 4.6743098280899176e-09, 0.9999985694885254, 3.528680472819623e-11, 1.4318087551146164e-06, 6.00952325554259e-14, 2.2117021103440493e-08, 2.8060131995744086e-09, 4.387932239779957e-10]', '[4.551339749525596e-09, 1.1372539021614614e-10, 2.0524453664405584e-11, 5.280633672555268e-07, 3.2800018257006514e-09, 0.999997615814209, 1.0720638243810754e-07, 2.8585214750798116e-10, 2.3032359308672312e-07, 1.5376924693555338e-06]', '[1.1121978893413598e-08, 5.991592644249977e-08, 6.800087715141956e-10, 7.614920468768105e-05, 1.5489677318569761e-09, 0.9999216794967651, 2.0082387663933332e-07, 1.2861261211583042e-07, 2.0752223406361736e-07, 1.4243813666325877e-06]', '[3.5808150755656243e-07, 6.118073409000502e-11, 1.4220456409641535e-12, 1.807679982264443e-13, 1.0800828675883167e-08, 1.3159755098968162e-06, 0.9999983310699463, 8.640208232039981e-13, 3.050620378530766e-08, 1.102154608897954e-12]', '[0.9999992847442627, 7.073578700556027e-08, 1.8049851391310767e-08, 3.234841908228425e-11, 2.9395572642698653e-09, 1.9322932587328978e-08, 5.8665182933737015e-08, 1.1840729285950147e-07, 3.0195556632861553e-07, 3.687067717805803e-08]', '[9.417300787925229e-14, 6.6525083219914904e-09, 2.1203584027462696e-11, 5.967168897674444e-14, 1.0, 2.9210982521732376e-09, 7.5933384580118e-11, 7.814685787366216e-09, 3.092408620841702e-11, 7.974350957340448e-09]', '[9.106057241581311e-09, 0.9999713897705078, 7.622535349582904e-07, 3.419783345393057e-09, 1.1207036550331395e-05, 1.148959327679222e-08, 4.057688585135111e-08, 1.562858233228326e-05, 7.728497735115525e-07, 2.0834065139752056e-07]', '[4.942542486396917e-10, 2.8413006322725032e-11, 1.2837224339090625e-10, 2.6874934277998364e-08, 5.162756679055747e-06, 2.4487134098194474e-08, 1.0173099342092318e-12, 3.5590690572462336e-07, 2.4905853024392854e-07, 0.999994158744812]', '[2.7312900783726946e-05, 7.199642482191848e-07, 4.5412423332891194e-07, 1.3969821338832844e-05, 9.890214278129861e-05, 0.9990178346633911, 9.493866855336819e-06, 0.0001359279704047367, 0.0005969655467197299, 9.835668606683612e-05]', '[6.119867113074662e-11, 4.3052629905560025e-08, 1.1300978819406282e-08, 1.5615242432431842e-07, 1.464126486716566e-09, 5.5175963353804036e-08, 3.8789047115354164e-14, 0.9999996423721313, 2.270485535693645e-11, 7.764705856061482e-08]', '[1.1487396477605216e-05, 7.938906421145475e-09, 2.5124229068751447e-05, 0.00011281159095233306, 1.0631051594600649e-08, 1.0426099834148772e-05, 4.268905229309894e-08, 1.0092550128959488e-09, 0.9995564818382263, 0.00028361938893795013]', '[0.0014312169514596462, 0.0003924315969925374, 8.994719973998144e-05, 0.00021820505207870156, 0.0023334503639489412, 0.45742490887641907, 0.002195287961512804, 0.013456621207296848, 0.008640599437057972, 0.513817310333252]', '[4.390428465228524e-09, 4.7227379695868876e-07, 0.00036872108466923237, 0.9996227025985718, 4.796954389263419e-09, 2.331433506697067e-06, 5.372861530683615e-11, 1.5271625670720823e-06, 2.5528956939524505e-06, 1.5980763237166684e-06]', '[1.1584300185774055e-10, 1.3721000868827105e-07, 3.194554665242322e-05, 1.0116932571690995e-05, 5.402898750617169e-05, 1.1775500752264634e-07, 1.3060768612460638e-11, 0.9999035596847534, 6.112370698474479e-08, 1.077614797395654e-07]', '[4.3713663444577833e-07, 0.00047954655019566417, 1.797774893930182e-05, 5.9761491684184875e-06, 0.995875895023346, 0.00013253426004666835, 4.98964880080166e-07, 0.00207971571944654, 1.0173152077186387e-05, 0.001397251384332776]', '[0.0006101204198785126, 4.5234455114950833e-07, 1.1953352974103382e-08, 2.657221553903355e-09, 1.1595624300753116e-06, 1.3950085303804372e-05, 0.9993728995323181, 1.5026502708792577e-08, 1.1986217032244895e-06, 2.8321579748080694e-07]', '[2.653283981413068e-10, 2.6200202896120572e-08, 4.315933832543806e-09, 2.45752012489886e-11, 0.999984622001648, 6.363887905536103e-07, 5.6611031418185576e-09, 1.1671657063061502e-07, 6.840772925897909e-08, 1.4512722373183351e-05]', '[8.26911943866554e-13, 1.2191673226524813e-09, 4.907156903044552e-09, 0.9999991655349731, 8.79222181171535e-11, 6.971524157961539e-07, 4.702235982859647e-14, 4.068565928605494e-08, 1.0495981861424752e-08, 5.781098266766094e-08]', '[0.9999997615814209, 8.96838766445418e-12, 7.0519168282601186e-09, 6.665769065189453e-12, 9.60640525460299e-11, 2.7119728684965594e-08, 3.394958270064308e-08, 4.021020405531317e-09, 4.166209599532067e-09, 2.646237646786176e-07]', '[1.3198342418974107e-10, 9.711205706253168e-08, 6.929413842726717e-08, 1.6340312214424557e-08, 4.122032937203812e-09, 2.840224500033628e-09, 1.3269958320117486e-13, 0.9999996423721313, 1.0661467542139036e-10, 8.046190203003789e-08]', '[1.0, 1.7088059483041929e-12, 3.330574149806864e-10, 1.5256514855708364e-13, 4.028645906456463e-12, 5.174070547009357e-11, 4.1509873316414314e-10, 1.7286010123296336e-10, 1.653440606652623e-09, 1.460680110199064e-08]', '[1.857966367424524e-06, 7.419611165460083e-07, 0.999988317489624, 3.4603037875058362e-06, 3.3557943091011566e-09, 3.317242258660258e-09, 8.426197239952415e-11, 5.4573165471083485e-06, 4.4763559969851485e-08, 3.0874204526298854e-08]', '[4.3102593849653204e-07, 1.4419651961361524e-06, 1.326665426404361e-07, 7.065465297273477e-07, 7.855433796066791e-05, 2.491115935754351e-07, 1.2524289383364362e-09, 2.6783003704622388e-05, 0.008710947819054127, 0.9911807179450989]', '[1.4522602738431534e-11, 0.9999997615814209, 5.216821419296025e-10, 3.889829864461847e-12, 1.7789852790883742e-07, 5.695562910190688e-10, 5.485524257053953e-10, 2.5572340689450357e-08, 5.79926640043027e-09, 3.797974379438074e-09]', '[3.224798629086062e-13, 2.599015758164569e-08, 6.587122243217891e-07, 5.661356112796057e-07, 4.817594344785903e-06, 4.213598803204377e-09, 1.8638221391600873e-14, 0.9999939203262329, 4.973550460363185e-10, 5.530838009804029e-09]']

License


author: Martin Gorner
twitter: @martin_gorner


Copyright 2019 Google LLC

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.


This is not an official Google product but sample code provided for an educational purpose


In [ ]: