In [0]:
!git clone https://github.com/google/TensorNetwork.git
!pip install ./TensorNetwork


fatal: destination path 'TensorNetwork' already exists and is not an empty directory.
Processing ./TensorNetwork
Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.6/dist-packages (from tensornetwork==0.4.0) (1.18.4)
Requirement already satisfied: graphviz>=0.11.1 in /usr/local/lib/python3.6/dist-packages (from tensornetwork==0.4.0) (0.14)
Requirement already satisfied: opt_einsum>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from tensornetwork==0.4.0) (3.2.1)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.6/dist-packages (from tensornetwork==0.4.0) (2.10.0)
Requirement already satisfied: scipy>=1.1 in /usr/local/lib/python3.6/dist-packages (from tensornetwork==0.4.0) (1.4.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py>=2.9.0->tensornetwork==0.4.0) (1.12.0)
Building wheels for collected packages: tensornetwork
  Building wheel for tensornetwork (setup.py) ... done
  Created wheel for tensornetwork: filename=tensornetwork-0.4.0-cp36-none-any.whl size=263053 sha256=de2f64c327c1074c9eafb9d7c3d7aa3bfd4dc076a640c7aadb912b2e566ab6e4
  Stored in directory: /tmp/pip-ephem-wheel-cache-5_ld1sxs/wheels/f0/25/c0/f94fcb8f0e82252f2ee53dc257fb4b039cc2184b321375ed18
Successfully built tensornetwork
Installing collected packages: tensornetwork
  Found existing installation: tensornetwork 0.4.0
    Uninstalling tensornetwork-0.4.0:
      Successfully uninstalled tensornetwork-0.4.0
Successfully installed tensornetwork-0.4.0

In [0]:
import tensorflow as tf
import tensornetwork as tn
import numpy as np
from tensornetwork.tn_keras.dense import DenseDecomp
from tensornetwork.tn_keras.mpo import DenseMPO
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
import math

In [0]:
def dummy_data(input_dim):
    np.random.seed(42)
    # Generate dummy data for use in tests
    data = np.random.randint(10, size=(100, input_dim))
    labels = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))), axis=0)
    return data, labels

Build Base Model and Tensorized Models


In [0]:
data, labels = dummy_data(1296)

# Build a fully connected network
model = Sequential()
model.add(Dense(512, use_bias=True, activation='relu', input_shape=(data.shape[1],)))
model.add(Dense(128, use_bias=True, activation='relu'))
model.add(Dense(1, use_bias=True, activation='sigmoid'))

# Build the same fully connected network using TN layer DenseDecomp
decomp_model = Sequential()
decomp_model.add(DenseDecomp(512, decomp_size=64, use_bias=True, activation='relu', input_shape=(data.shape[1],)))
decomp_model.add(DenseDecomp(128, decomp_size=64, use_bias=True, activation='relu'))
decomp_model.add(DenseDecomp(1, decomp_size=8, use_bias=True, activation='sigmoid'))

# Build the same fully connected network using TN layer DenseMPO
mpo_model = Sequential()
mpo_model.add(DenseMPO(256, num_nodes=4, bond_dim=8, use_bias=True, activation='relu', input_shape=(1296,)))
mpo_model.add(DenseMPO(81, num_nodes=4, bond_dim=4, use_bias=True, activation='relu'))
mpo_model.add(Dense(1, use_bias=True, activation='sigmoid'))

Analyze Parameter Reduction from Tensorization


In [0]:
model.summary()


Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                (None, 512)               664064    
_________________________________________________________________
dense_1 (Dense)              (None, 128)               65664     
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 129       
=================================================================
Total params: 729,857
Trainable params: 729,857
Non-trainable params: 0
_________________________________________________________________

In [0]:
decomp_model.summary()


Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_decomp (DenseDecomp)   (None, 512)               116224    
_________________________________________________________________
dense_decomp_1 (DenseDecomp) (None, 128)               41088     
_________________________________________________________________
dense_decomp_2 (DenseDecomp) (None, 1)                 1033      
=================================================================
Total params: 158,345
Trainable params: 158,345
Non-trainable params: 0
_________________________________________________________________

In [0]:
mpo_model.summary()


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_mpo (DenseMPO)         (None, 256)               3712      
_________________________________________________________________
dense_mpo_1 (DenseMPO)       (None, 81)                561       
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 82        
=================================================================
Total params: 4,355
Trainable params: 4,355
Non-trainable params: 0
_________________________________________________________________

In [0]:
print(f'Compression factor from tensorization with DenseDecomp: {model.count_params() / decomp_model.count_params()}')
print(f'Compression factor from tensorization with DenseMPO: {model.count_params() / mpo_model.count_params()}')


Compression factor from tensorization with DenseDecomp: 4.609283526476997
Compression factor from tensorization with DenseMPO: 167.5905855338691

Train Models for Comparison


In [0]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model for 10 epochs
history = model.fit(data, labels, epochs=10, batch_size=32)


Epoch 1/10
4/4 [==============================] - 0s 8ms/step - loss: 10.5600 - accuracy: 0.4600
Epoch 2/10
4/4 [==============================] - 0s 6ms/step - loss: 3.5192 - accuracy: 0.5000
Epoch 3/10
4/4 [==============================] - 0s 6ms/step - loss: 1.6383 - accuracy: 0.5600
Epoch 4/10
4/4 [==============================] - 0s 6ms/step - loss: 3.2225 - accuracy: 0.5000
Epoch 5/10
4/4 [==============================] - 0s 5ms/step - loss: 1.5394 - accuracy: 0.5000
Epoch 6/10
4/4 [==============================] - 0s 6ms/step - loss: 0.9699 - accuracy: 0.5600
Epoch 7/10
4/4 [==============================] - 0s 7ms/step - loss: 0.6281 - accuracy: 0.6700
Epoch 8/10
4/4 [==============================] - 0s 5ms/step - loss: 0.5347 - accuracy: 0.7500
Epoch 9/10
4/4 [==============================] - 0s 6ms/step - loss: 0.4560 - accuracy: 0.8200
Epoch 10/10
4/4 [==============================] - 0s 5ms/step - loss: 0.4128 - accuracy: 0.9100

In [0]:
decomp_model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the tensorized model for 10 epochs
history = decomp_model.fit(data, labels, epochs=10, batch_size=32)


Epoch 1/10
4/4 [==============================] - 0s 7ms/step - loss: 2.0713 - accuracy: 0.5700
Epoch 2/10
4/4 [==============================] - 0s 4ms/step - loss: 1.7343 - accuracy: 0.4500
Epoch 3/10
4/4 [==============================] - 0s 4ms/step - loss: 1.3718 - accuracy: 0.5100
Epoch 4/10
4/4 [==============================] - 0s 4ms/step - loss: 1.0777 - accuracy: 0.4900
Epoch 5/10
4/4 [==============================] - 0s 5ms/step - loss: 1.3767 - accuracy: 0.5400
Epoch 6/10
4/4 [==============================] - 0s 5ms/step - loss: 0.8864 - accuracy: 0.5500
Epoch 7/10
4/4 [==============================] - 0s 5ms/step - loss: 0.5909 - accuracy: 0.6800
Epoch 8/10
4/4 [==============================] - 0s 5ms/step - loss: 0.5715 - accuracy: 0.6900
Epoch 9/10
4/4 [==============================] - 0s 5ms/step - loss: 0.4912 - accuracy: 0.7200
Epoch 10/10
4/4 [==============================] - 0s 5ms/step - loss: 0.3498 - accuracy: 0.9000

In [0]:
mpo_model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the tensorized model for 10 epochs
history = mpo_model.fit(data, labels, epochs=10, batch_size=32)


Epoch 1/10
4/4 [==============================] - 0s 10ms/step - loss: 0.6926 - accuracy: 0.5100
Epoch 2/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6890 - accuracy: 0.5100
Epoch 3/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6856 - accuracy: 0.5000
Epoch 4/10
4/4 [==============================] - 0s 7ms/step - loss: 0.6813 - accuracy: 0.5300
Epoch 5/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6776 - accuracy: 0.7200
Epoch 6/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6733 - accuracy: 0.8400
Epoch 7/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6689 - accuracy: 0.8300
Epoch 8/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6635 - accuracy: 0.8400
Epoch 9/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6581 - accuracy: 0.8100
Epoch 10/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6501 - accuracy: 0.9300

In [0]: