In [1]:
import tensorflow as tf
In [2]:
tf.__version__
Out[2]:
In [7]:
tf.enable_eager_execution() # once만 가능
In [8]:
x = [[2.]]
m = tf.matmul(x, x)
In [9]:
print(m)
In [10]:
a = tf.constant([[1, 2],
[3, 4]])
In [11]:
a
Out[11]:
In [12]:
print(a)
In [13]:
a1 = tf.constant([[1, 2],
[3, 4]])
In [14]:
a1
Out[14]:
In [15]:
tf.constant([[1, 2],
[3, 4]])
Out[15]:
In [16]:
b = tf.add(a, 1)
In [17]:
b
Out[17]:
In [18]:
a
Out[18]:
In [19]:
import numpy as np
In [20]:
c = np.multiply(a, b)
In [21]:
c
Out[21]:
In [22]:
c
Out[22]:
In [23]:
a.numpy()
Out[23]:
In [25]:
b.numpy()
Out[25]:
In [26]:
b
Out[26]:
In [27]:
def fizzbuzz(max_num):
counter = tf.constant(0)
for num in range(max_num):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print("FizzBuzz")
elif int(num % 3) == 0:
print("Fizz")
elif int(num % 5) == 0:
print("Buzz")
else:
print(num)
counter += 1
return counter
In [28]:
fizzbuzz(3)
Out[28]:
In [29]:
fizzbuzz(10)
Out[29]:
In [34]:
class MySimpleLayer(tf.keras.layers.Layer):
def __init__(self, output_units):
self.output_units = output_units
def build(self, input):
# The build method gets called the first time your layer is used.
# Creating variables on build() allows you to make their shape depend
# on the input shape and hence remove the need for the user to specify
# full shapes. It is possible to create variables during __init__() if
# you already know their full shapes.
self.kernel = self.add_variable("kernel", [input.shape[-1], self.output_units])
def call(self, input):
# Override call() instead of __call__ so we can perform some bookkeeping.
return tf.matmul(input, self.kernel)
In [37]:
MySimpleLayer
Out[37]:
In [38]:
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, input_shape=(784,)), # must declare input shape
tf.keras.layers.Dense(10)
])
In [40]:
class MNISTModel(tf.keras.Model):
'''
Pytorch스럽다
'''
def __init__(self):
super(MNISTModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(units=10)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, input):
"""Run the model."""
result = self.dense1(input)
result = self.dense2(result)
result = self.dense2(result) # reuse variables from dense2 layer
return result
model = MNISTModel()
In [41]:
model
Out[41]:
tf.GradientTape
In [44]:
tfe = tf.contrib.eager
In [46]:
w = tfe.Variable([[1.0]])
with tfe.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, [w])
print(grad)
In [47]:
grad
Out[47]:
In [48]:
# A toy dataset of points around 3 * x + 2
NUM_EXAMPLES = 1000
training_inputs = tf.random_normal([NUM_EXAMPLES])
noise = tf.random_normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
def prediction(input, weight, bias):
return input * weight + bias
# A loss function using mean-squared error
def loss(weights, biases):
error = prediction(training_inputs, weights, biases) - training_outputs
return tf.reduce_mean(tf.square(error))
# Return the derivative of loss with respect to weight and bias
def grad(weights, biases):
with tfe.GradientTape() as tape:
loss_value = loss(weights, biases)
return tape.gradient(loss_value, [weights, biases])
train_steps = 200
learning_rate = 0.01
# Start with arbitrary values for W and B on the same batch of data
W = tfe.Variable(5.)
B = tfe.Variable(10.)
print("Initial loss: {:.3f}".format(loss(W, B)))
for i in range(train_steps):
dW, dB = grad(W, B)
W.assign_sub(dW * learning_rate)
B.assign_sub(dB * learning_rate)
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(W, B)))
print("Final loss: {:.3f}".format(loss(W, B)))
print("W = {}, B = {}".format(W.numpy(), B.numpy()))
In [50]:
batch = tf.zeros([1, 1, 784])
print(batch.shape) # => (1, 1, 784)
result = model(batch)
In [51]:
result
Out[51]:
In [ ]: