| Software | Creator | Opensource | Interface |
|---|---|---|---|
| Apache SINGA | Apache Incubator | Yes | Python, C++, Java |
| Caffe | Berkeley Vision and Learning Center | Yes | Python, MATLAB |
| Deeplearning4j | Skymind engineering team; Deeplearning4j community; originally Adam Gibson | Yes | Java, Scala, Clojure, Python (Keras) |
| Dlib | Davis King | Yes | C++ |
| Keras | François Chollet | Yes | Python |
| Microsoft Cognitive Toolkit | Microsoft Research | Yes | Python, C++, Command line, BrainScript (.NET on roadmap) |
| MXNet | Distributed (Deep) Machine Learning Community | Yes | C++, Python, Julia, Matlab, JavaScript, Go, R, Scala, Perl |
| Neural Designer | Artelnics | No | Graphical user interface |
| OpenNN | Artelnics | Yes | C++ |
| TensorFlow | Google Brain team | Yes | Python (Keras), C/C++, Java, Go, R |
| Theano | Université de Montréal | Yes | Python |
| Torch | Ronan Collobert, Koray Kavukcuoglu, Clement Farabet | Yes | Lua, LuaJIT, C, utility library for C++/OpenCL |
| Wolfram Mathematica | Wolfram Research | Yes | Command line, Java, C++ |
In [1]:
# Load Module
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
In [2]:
# Load dataset.
iris = datasets.load_iris() # 총 150개의 붓꽃 사진과 class load
x_train, x_test, y_train, y_test = model_selection.train_test_split(iris.data, iris.target, test_size=0.2, random_state=42)
print('train and test ready')
In [3]:
x_train[:10]
# 각 열은 꽃받침 길이, 꽃받침 너비, 꽃잎 길이, 꽃잎 너비
Out[3]:
In [4]:
y_train[:10] # 0,1,2는 꽃의 종 의미
Out[4]:
In [6]:
# 10, 20, 10 단위로 각각 3층 DNN 생성
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x_train) # list feature column
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
In [9]:
# Train.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
In [10]:
# Score with sklearn.
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
In [11]:
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)
y = list(classifier.predict(new_samples, as_iterable=True))
In [12]:
print('Predictions: {}'.format(str(y)))
In [ ]:
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
In [ ]:
input = ...
net = slim.conv2d(input, 128, [3, 3], scope='conv1_1')
In [1]:
import tensorflow as tf
In [2]:
a = tf.add(2, 3)
In [3]:
a = tf.add(3, 5)
In [4]:
print (a)
In [5]:
sess = tf.Session()
sess.run(a)
Out[5]:
In [17]:
a = tf.add(3, 5)
with tf.Session() as sess:
print (sess.run(a))
In [18]:
x = 2
y = 3
op1 = tf.add(x, y)
op2 = tf.multiply(x, y)
useless = tf.multiply(x, op1)
op3 = tf.pow(op2, op1)
with tf.Session() as sess:
op3 = sess.run(op3)
In [4]:
x = 2
y = 3
op1 = tf.add(x, y)
op2 = tf.multiply(x, y)
useless = tf.multiply(x, op1)
op3 = tf.pow(op2, op1)
with tf.Session() as sess:
op3, not_useless = sess.run([op3, useless])
In [8]:
# Creates a graph.
with tf.device("/cpu:0"): # 연산장치 선택 가능
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape = [2,3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape = [3,2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) # tf.ConfigProto(log_device_placement=True) cpu와 gpu연산이 모두 가능할 때 gpu선택
# Runs the op.
print (sess.run(a))
print (sess.run(b))
print (sess.run(c))
In [9]:
a = tf.constant(2)
b = tf.constant(3)
x = tf.add(a, b)
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs', sess.graph) # 텐서보드에서 볼 수 있는 그래프 저장
print (sess.run(x))
# close the writer when you’re done using it
writer.close()
In [10]:
# constant of 1d tensor (vector)
a = tf.constant([2, 2], name="vector")
# constant of 2x2 tensor (matrix)
b = tf.constant([[0, 1], [2, 3]], name="b")
In [11]:
with tf.Session() as sess:
print(sess.run(a))
print(sess.run(b))
In [54]:
with tf.Session() as sess:
print (sess.run(tf.zeros([2, 3], tf.int32))) # [[0, 0, 0], [0, 0, 0]]
In [12]:
import numpy as np
In [13]:
np.zeros((2,3), dtype=np.int32)
Out[13]:
In [14]:
input_tensor = [[0, 1], [2, 3], [4, 5]]
with tf.Session() as sess:
print (sess.run(tf.zeros_like(input_tensor))) # [[0, 0], [0, 0], [0, 0]]
In [27]:
np.zeros_like(input_tensor)
Out[27]:
In [57]:
with tf.Session() as sess:
print(sess.run(tf.ones([2, 3], tf.int32))) # [[1, 1, 1], [1, 1, 1]]
In [28]:
np.ones([2,3], dtype=np.int32)
Out[28]:
In [58]:
input_tensor = [[0, 1], [2, 3], [4, 5]]
with tf.Session() as sess:
print(sess.run(tf.ones_like(input_tensor))) # [[1, 1], [1, 1], [1, 1]]
In [29]:
np.ones_like(input_tensor)
Out[29]:
In [61]:
with tf.Session() as sess:
print(sess.run(tf.fill([2, 3], 8))) # [[8, 8, 8], [8, 8, 8]]
In [62]:
with tf.Session() as sess:
print(sess.run(tf.linspace(10.0, 13.0, 4, name="linspace"))) # [10.0 11.0 12.0 13.0]
In [63]:
with tf.Session() as sess:
print(sess.run(tf.range(3, 18, 3))) # [3, 6, 9, 12, 15]
In [67]:
for _ in range(4):# OK
a
In [68]:
for _ in tf.range(4): # TypeError("'Tensor' object is not iterable.")
a
In [45]:
with tf.Session() as sess:
print(sess.run(tf.random_normal(shape = [2,3])))
In [46]:
with tf.Session() as sess:
print(sess.run(tf.truncated_normal(shape = [2,3])))
In [52]:
with tf.Session() as sess:
print(sess.run(tf.multinomial(tf.random_normal(shape = [2,3]),5)))
In [51]:
with tf.Session() as sess:
print(sess.run(tf.random_gamma(shape = [2,3], alpha = 1)))
In [58]:
a = tf.constant([[2,1], [3,2], [7,3]])
In [42]:
with tf.Session() as sess:
print(sess.run(tf.random_shuffle(a)))
In [65]:
with tf.Session() as sess:
print(sess.run(tf.random_crop(a, [2,1])))
In [26]:
a = tf.constant([3, 6])
b = tf.constant([2, 2])
In [27]:
with tf.Session() as sess:
print(sess.run(tf.add(a, b))) # >> [5 8], 2개의 input을 받아 덧셈
In [28]:
with tf.Session() as sess:
print(sess.run(tf.add_n([a, b, b]))) # >> [7 10]. 모든 input을 덧셈
In [29]:
with tf.Session() as sess:
print(sess.run(tf.multiply(a, b))) # >> [6 12] because mul is element wise
In [31]:
# matmul: 2차원이상의 텐서간의 곱
with tf.Session() as sess:
print(sess.run(tf.matmul(tf.reshape(a, shape=[1, 2]),
tf.reshape(b, shape=[2, 1]))))
In [32]:
with tf.Session() as sess:
print(sess.run(tf.div(a, b))) # >> [1 3], 나눗셈 실행
In [33]:
with tf.Session() as sess:
print(sess.run(tf.mod(a, b))) # >> [1 0], 나머지 반환
In [78]:
# 0차원 상수텐서 - 스칼라
t_0 = 19
with tf.Session() as sess:
print(sess.run(tf.zeros_like(t_0))) # ==> 0
print(sess.run(tf.ones_like(t_0))) # ==> 1
In [79]:
# 1차원 텐서 - 벡터
t_1 = [b"apple", b"peach", b"grape"]
with tf.Session() as sess:
print(sess.run(tf.zeros_like(t_1))) # ==> ['' '' '']
print(sess.run(tf.ones_like(t_1))) # ==> TypeError: Expected string, got 1 of type 'int' instead.
In [68]:
# 2차원 텐서 - 메트릭스
t_2 = [[True, False, False],
[False, False, True],
[False, True, False]]
with tf.Session() as sess:
print(sess.run(tf.zeros_like(t_2))) # ==> 2x2 tensor, 모든 원소값 False
print(sess.run(tf.ones_like(t_2))) # ==> 2x2 tensor, 모든 원소값 True
In [84]:
my_const = tf.constant([1.0, 2.0], name="my_const")
print (tf.get_default_graph().as_graph_def())
In [85]:
# a를 스칼라 값으로 생성
a = tf.Variable(2, name="scalar")
# b를 벡터로 생성
b = tf.Variable([2, 3], name="vector")
# c를 2x2 matrix로 생성
c = tf.Variable([[0, 1], [2, 3]], name="matrix")
# W를 0으로 채워진 784x10 tensor로 생성
W = tf.Variable(tf.zeros([784,10]))
In [88]:
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
In [92]:
init_ab = tf.variables_initializer([a, b], name = "init_ab")
with tf.Session() as sess:
sess.run(init)
In [16]:
W = tf.Variable(tf.truncated_normal([700, 10]))
with tf.Session() as sess:
sess.run(W.initializer)
print (W)
In [17]:
with tf.Session() as sess:
sess.run(W.initializer)
print (W.eval())
In [18]:
W = tf.Variable(10)
W.assign(100) # 100이 W에 할당되지 않음
with tf.Session() as sess:
sess.run(W.initializer)
print (W.eval()) # >> 10
In [19]:
W = tf.Variable(10)
assign_op = W.assign(100) # assign이 W를 initialize시킴
with tf.Session() as sess:
sess.run(assign_op)
print (W.eval()) # >> 100
In [105]:
# 값이 2인 변수 a 생성
a = tf.Variable(2, name="scalar")
# a_times_two에 a * 2 할당
a_times_two = a.assign(a * 2)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init) # a_times_two가 a에 따라 바뀌기 때문에 반드시 a를 initialize시켜줘야 함
sess.run(a_times_two) # >> 4
# sess.run(a_times_two) # >> 8
# sess.run(a_times_two) # >> 16
print (a_times_two.eval())
In [107]:
W = tf.Variable(10)
with tf.Session() as sess:
sess.run(W.initializer) # assign_add와 assign_sub는 assign과는 다르게 variable을 initialize시켜주지 않음
print(sess.run(W.assign_add(10)))
print(sess.run(W.assign_sub(2)))
In [20]:
W = tf.Variable(10)
sess1 = tf.Session()
sess2 = tf.Session()
sess1.run(W.initializer)
sess2.run(W.initializer)
print(sess1.run(W.assign_add(10))) # ==> 20
print(sess2.run(W.assign_sub(2))) # ==> 8
print(sess1.run(W.assign_add(100))) # ==> 120
print(sess2.run(W.assign_sub(50))) # ==> -42
sess1.close()
sess2.close()
In [108]:
W = tf.Variable(tf.truncated_normal([700, 10]))
U = tf.Variable(W * 2)
In [21]:
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
print(c.eval())
sess.close()
In [ ]:
# your graph g have 5 ops: a, b, c, d, e
with g.control_dependencies([a, b, c]):
d = ....
e = ....
In [120]:
# create a placeholder of type float 32-bit, shape is a vector of 3 elements
a = tf.placeholder(tf.float32, shape=[3])
# create a constant of type float 32-bit, shape is a vector of 3 elements
b = tf.constant([5, 5, 5], tf.float32)
# use the placeholder as you would a constant or a variable
c = a + b # Short for tf.add(a, b)
with tf.Session() as sess:
# feed [1, 2, 3] to placeholder a via the dict {a: [1, 2, 3]}
# fetch value of c
writer = tf.summary.FileWriter('./my_graph', sess.graph)
# print(sess.run(c)) # ==> Error
print(sess.run(c, {a: [1, 2, 3]}))
In [69]:
# create Operations, Tensors, etc (using the default graph)
a = tf.add(2, 5)
b = tf.multiply(a, 3)
# start up a `Session` using the default graph
sess = tf.Session()
# define a dictionary that says to replace the value of `a` with 15
replace_dict = {a: 15}
# Run the session, passing in `replace_dict` as the value to `feed_dict`
sess.run(b, feed_dict=replace_dict) # returns 45
Out[69]:
In [133]:
# Normal loading
x = tf.Variable(10, name='x')
y = tf.Variable(20, name='y')
z = tf.add(x, y)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./my_graph/l2', sess.graph)
for _ in range(10):
sess.run(z)
writer.close()
In [ ]:
# Lazy loading
x = tf.Variable(10, name='x')
y = tf.Variable(20, name='y')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./my_graph/l2', sess.graph)
for _ in range(10):
sess.run(tf.add(x, y)) # someone decides to be clever to save one line of code
writer.close()