In [31]:
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import math
In [7]:
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
In [8]:
# Import MNIST data
mnist = input_data.read_data_sets('mnist', one_hot=True)
In [9]:
# The features are already scaled and the data is shuffled
train_features = mnist.train.images
test_features = mnist.test.images
train_labels = mnist.train.labels.astype(np.float32)
test_labels = mnist.test.labels.astype(np.float32)
In [10]:
# Weights & bias
weights = tf.Variable(tf.random_normal([n_input, n_classes]))
bias = tf.Variable(tf.random_normal([n_classes]))
Calculate the memory size of train_features, train_labels, weights, and bias in bytes. Ignore memory for overhead, just calculate the memory required for the stored data.
You may have to look up how much memory a float32 requires, using this link. (Single-precision floating-point format is a computer number format that occupies 4 bytes (32 bits) in computer memory and represents a wide dynamic range of values by using a floating point.)
train_features Shape: (55000, 784) Type: float32
train_labels Shape: (55000, 10) Type: float32
weights Shape: (784, 10) Type: float32
bias Shape: (10,) Type: float32
In [26]:
print("Train features size = ", train_features.size * 4)
print("Train labels size = ", train_labels.size * 4)
print("Weights size =", 784 * 10 * 4)
print("Bias size = ", 10 * 4)
In [34]:
print("How many batches are there? ", math.ceil(50000 / 128))
print("What is the last batch size? ", 50000 % 128)