In [1]:
import tensorflow as tf

In [2]:
hellow_constant = tf.constant('Hello Tensor Constant')

with tf.Session() as sess:
    output = sess.run(hellow_constant)
    print(output)


b'Hello Tensor Constant'

In [10]:
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.float32)
z = tf.placeholder(tf.int32)

with tf.Session() as sess:
    output = sess.run(x, feed_dict={x:'string', y: 123.22, z: 123})
    print(output)


string

In [16]:
x = tf.constant(10)
y = tf.constant(2)
z = tf.subtract(tf.divide(x, y), tf.cast(tf.constant(1), tf.float64))

with tf.Session() as sess:
    output = sess.run(z)
    print(output)


4.0

In [18]:
from tensorflow.examples.tutorials.mnist import input_data

In [21]:
def get_weights(n_features, n_labels):
    return tf.Variable(tf.truncated_normal((n_features, n_labels)))

def get_biases(n_labels):
    return tf.Variable(tf.zeros(n_labels))

def linear(inputs, W, b):
    return tf.add(tf.matmul(inputs, W), b)

In [23]:
def mnist_features_labels(n_labels):
    """
    Gets the first <n> labels from the MNIST dataset
    :param n_labels: Number of labels to use
    :return: Tuple of feature list and label list
    """
    mnist_features = []
    mnist_labels = []

    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)

    # In order to make quizzes run faster, we're only looking at 10000 images
    for mnist_feature, mnist_label in zip(*mnist.train.next_batch(10000)):

        # Add features and labels if it's for the first <n>th labels
        if mnist_label[:n_labels].any():
            mnist_features.append(mnist_feature)
            mnist_labels.append(mnist_label[:n_labels])

    return mnist_features, mnist_labels


# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3

# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)

# Weights and Biases
w = get_weights(n_features, n_labels)
b = get_biases(n_labels)

# Linear Function xW + b
logits = linear(features, w, b)

# Training data
train_features, train_labels = mnist_features_labels(n_labels)

with tf.Session() as session:
    # TODO: Initialize session variables
    tf.global_variables_initializer(session)
    
    # Softmax
    prediction = tf.nn.softmax(logits)

    # Cross entropy
    # This quantifies how far off the predictions were.
    # You'll learn more about this in future lessons.
    cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)

    # Training loss
    # You'll learn more about this in future lessons.
    loss = tf.reduce_mean(cross_entropy)

    # Rate at which the weights are changed
    # You'll learn more about this in future lessons.
    learning_rate = 0.08

    # Gradient Descent
    # This is the method used to train the model
    # You'll learn more about this in future lessons.
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

    # Run optimizer and get loss
    _, l = session.run(
        [optimizer, loss],
        feed_dict={features: train_features, labels: train_labels})

# Print loss
print('Loss: {}'.format(l))


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-23-e3ac7f66eaf0> in <module>()
     38 
     39 # Training data
---> 40 train_features, train_labels = mnist_features_labels(n_labels)
     41 
     42 with tf.Session() as session:

<ipython-input-23-e3ac7f66eaf0> in mnist_features_labels(n_labels)
      8     mnist_labels = []
      9 
---> 10     mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
     11 
     12     # In order to make quizzes run faster, we're only looking at 10000 images

~/anaconda/envs/tflearn/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py in read_data_sets(train_dir, fake_data, one_hot, dtype, reshape, validation_size, seed)
    233 
    234   local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
--> 235                                    SOURCE_URL + TRAIN_IMAGES)
    236   with open(local_file, 'rb') as f:
    237     train_images = extract_images(f)

~/anaconda/envs/tflearn/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py in maybe_download(filename, work_directory, source_url)
    206   filepath = os.path.join(work_directory, filename)
    207   if not gfile.Exists(filepath):
--> 208     temp_file_name, _ = urlretrieve_with_retry(source_url)
    209     gfile.Copy(temp_file_name, filepath)
    210     with gfile.GFile(filepath) as f:

~/anaconda/envs/tflearn/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py in wrapped_fn(*args, **kwargs)
    163       for delay in delays():
    164         try:
--> 165           return fn(*args, **kwargs)
    166         except Exception as e:  # pylint: disable=broad-except)
    167           if is_retriable is None:

~/anaconda/envs/tflearn/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py in urlretrieve_with_retry(url, filename)
    188 @retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
    189 def urlretrieve_with_retry(url, filename=None):
--> 190   return urllib.request.urlretrieve(url, filename)
    191 
    192 

~/anaconda/envs/tflearn/lib/python3.5/urllib/request.py in urlretrieve(url, filename, reporthook, data)
    215 
    216             while True:
--> 217                 block = fp.read(bs)
    218                 if not block:
    219                     break

~/anaconda/envs/tflearn/lib/python3.5/http/client.py in read(self, amt)
    446             # Amount is given, implement using readinto
    447             b = bytearray(amt)
--> 448             n = self.readinto(b)
    449             return memoryview(b)[:n].tobytes()
    450         else:

~/anaconda/envs/tflearn/lib/python3.5/http/client.py in readinto(self, b)
    486         # connection, and the user is reading more bytes than will be provided
    487         # (for example, reading in 1k chunks)
--> 488         n = self.fp.readinto(b)
    489         if not n and b:
    490             # Ideally, we would raise IncompleteRead if the content-length

~/anaconda/envs/tflearn/lib/python3.5/socket.py in readinto(self, b)
    573         while True:
    574             try:
--> 575                 return self._sock.recv_into(b)
    576             except timeout:
    577                 self._timeout_occurred = True

~/anaconda/envs/tflearn/lib/python3.5/ssl.py in recv_into(self, buffer, nbytes, flags)
    927                   "non-zero flags not allowed in calls to recv_into() on %s" %
    928                   self.__class__)
--> 929             return self.read(nbytes, buffer)
    930         else:
    931             return socket.recv_into(self, buffer, nbytes, flags)

~/anaconda/envs/tflearn/lib/python3.5/ssl.py in read(self, len, buffer)
    789             raise ValueError("Read on closed or unwrapped SSL socket.")
    790         try:
--> 791             return self._sslobj.read(len, buffer)
    792         except SSLError as x:
    793             if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:

~/anaconda/envs/tflearn/lib/python3.5/ssl.py in read(self, len, buffer)
    573         """
    574         if buffer is not None:
--> 575             v = self._sslobj.read(len, buffer)
    576         else:
    577             v = self._sslobj.read(len)

KeyboardInterrupt: 

In [ ]: