In [1]:
# %load /Users/facai/Study/book_notes/preconfig.py
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set(font='SimHei', font_scale=2.5)
from IPython.display import Image
drawbacks:
main changes:
get_variable
=> tf.Variable
+ scoped factory functionvariable_scope
=> name_scope
variable_creator_scope
feedback:
a[1].assign(5)
TODO:
variable_scope.variable
# 封装到variable_scope.variable_createor_scope:
with ops.get_default_graph()._variable_creator_scope(custom_creator):
# 封装进tf.Variable:
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
with variable_creator_scope(custom_creator_function):
tf.Variable(args)
tf.Variable(*args, **kwargs)
, use __init__
or __call__
.
=> metaclass behaviortf.Print
=> tf.print
, or tf.strings.format
print
在python 2里是statement,不能重载t = xxx
with tf.control_dependencies([tf.print(xxxx)]):
t_2 = 2 * t
problem:
Unify TF RNN and Keras RNN: port functionalities from TF RNN to Keras.
if activation == 'tan' and dropout == 0 and use_bias == True:
self.could_use_cudnn = True
else:
self.could_use_cudnn = False
@tf_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
常用三种用法:
queue => tf.data
variable
class VariableTracker(object):
def __init__(self):
self.variables = []
def variable_tracker(self, next_creator, **kwargs):
v = next_creator(**kwargs)
self.variables.append(v)
return v
VariableTracker tracker
with tf.variable_creator_scope(tracker.variable_tracker):
...
a = tf.Variable(0)
...
assert tracker.variables == [a]
update
def model_fn(features, labels, mode, params, config):
logits = ...
batch_norm = tf.BatchNormalization(momentum=0.99)
logits = batch_norm(logits)
train_op = …
train_op = tf.group(train_op, *batch_norm.updates)
return tf.EstimatorSpec(..., train_op=train_op, ...)
Table: 还未确定
Summary, Condition => V2 design
处理:
experimental
. moving to a seperate repository
tensorflow/addons: layer, metric, loss, optimizer, op or kernel
人员组成
10月底会有第一次kickoff
obejctive:
import tensorflow as tf
@tf.function
def compute_z1(x, y):
return tf.add(x, y)
@tf.function
def compute_z0(x):
return compute_z1(x, tf.square(x))
z0 = compute_z0(2.)
z1 = compute_z1(2., 2.)
机制:@tf.function: function => class: trace_cache_key 缓存graph
特性:
For W
, b
, and c
, the lifetime of the Python objects and the runtime state are tied together.
W = tf.Variable(
tf.glorot_uniform_initializer()(
(10, 10)))
b = tf.Variable(tf.zeros(10))
c = tf.Variable(0)
@tf.function
def f(x):
c.assign_add(1)
return tf.matmul(x, W) + b
print(f(make_input_value())
assert int(c) == 1
a = tf.Variable(1.0)
b = tf.Variable(1.0)
@tf.function
def f():
a.assign(2.0)
b.assign(3.0)
return a + b
print(f())
cache graph:
Too many traces
@tf.function
def f(x):
return tf.square(x)
f(tf.constant(1, dtype=tf.int32))
f(tf.constant(1.0, dtype=tf.float32))
f(2.0) # use tf.constant instead.
f(3.0)
@tf.function(input_signature=((tf.float32, [None]))
def f(x): return tf.add(x, 1.)
class
class ScalarModel(object):
def __init__(self):
self.v = tf.Variable(0)
@tf.function
def increment(self, amount):
self.v.assign_add(amount)
graph = f.graph_function((tf.float32, (None, 10)).graph
Derived/Related Graphs
@tf.function
def f(x):
return tf.square(x)
@tf.function
def g(x):
return tf.square(f(x))
g(2.0) # 16.0
限制:
def f(x, y):
if tf.equal(y, 0.0):
return y
return x / y
其他:
feedback:
TODO:
In [ ]: