API tutorial

Expression building

(note: may have old API in some cases)


In [ ]:
from dynet import *

## ==== Create a new computation graph
# (it is a singleton, we have one at each stage.
# renew_cg() clears the current one and starts anew)
renew_cg()

## ==== Creating Expressions from user input / constants.
x = scalarInput(value)

v = vecInput(dimension)
v.set([1,2,3])

z = matInput(dim1, dim2)

# for example:
z1 = matInput(2, 2)
z1.set([1,2,3,4]) # Column major

# Or directly from a numpy array
z1 = inputTensor([[1,2],[3,4]]) # Row major

## ==== We can take the value of an expression. 
# For complex expressions, this will run forward propagation.
print z.value()    
print z.npvalue()      # as numpy array
print v.vec_value()    # as vector, if vector
print x.scalar_value() # as scalar, if scalar
print x.value()        # choose the correct one

## ==== Parameters
# Parameters are things we tune during training.
# Usually a matrix or a vector.

# First we create a parameter collection and add the parameters to it.
m = ParameterCollection() 
pW = m.add_parameters((8,8)) # an 8x8 matrix
pb = m.add_parameters(8)

# then we create an Expression out of the parameter collection's parameters
W = parameter(pW)
b = parameter(pb)

## ===== Lookup parameters
# Similar to parameters, but are representing a "lookup table"
# that maps numbers to vectors.
# These are used for embedding matrices.
# for example, this will have VOCAB_SIZE rows, each of DIM dimensions.
lp  = m.add_lookup_parameters((VOCAB_SIZE, DIM))

# lookup parameters can be initialized from an existing array, i.e:
# m["lookup"].init_from_array(wv)

e5  = lookup(lp, 5)   # create an Expression from row 5.
e5  = lp[5]           # same
e5c = lookup(lp, 5, update=False)  # as before, but don't update when optimizing.

e5  = lookup_batch(lp, [4, 5])   # create a batched Expression from rows 4 and 5.
e5  = lp.batch([4, 5])           # same

e5.set(9)   # now the e5 expression contains row 9
e5c.set(9)  # ditto


## ===== Combine expression into complex expressions.

# Math 
e = e1 + e2   
e = e1 * e2   # for vectors/matrices: matrix multiplication (like e1.dot(e2) in numpy)
e = e1 - e2    
e = -e1 

e = dot_product(e1, e2)
e = cmult(e1, e2)           # component-wise multiply  (like e1*e2 in numpy)
e = cdiv(e1, e2)            # component-wise divide
e = colwise_add(e1, e2)     # column-wise addition

# Matrix Shapes
e = reshape(e1, new_dimension)
e = transpose(e1)

# Per-element unary functions.
e = tanh(e1)      
e = exp(e1)
e = log(e1)
e = logistic(e1)   # Sigmoid(x)
e = rectify(e1)    # Relu (= max(x,0))
e = softsign(e1)    # x/(1+|x|)

# softmaxes
e = softmax(e1)
e = log_softmax(e1, restrict=[]) # restrict is a set of indices. 
                                 # if not empty, only entries in restrict are part 
                                 # of softmax computation, others get 0.


e = sum_cols(e1)


# Picking values from vector expressions
e = pick(e1, k)              # k is unsigned integer, e1 is vector. return e1[k]
e = e1[k]                    # same

e = pickrange(e1, k, v)      # like python's e1[k:v] for lists. e1 is an Expression, k,v integers.
e = e1[k:v]                  # same

e = pickneglogsoftmax(e1, k) # k is unsigned integer. equiv to: (pick(-log(softmax(e1)), k))
                             

# Neural net stuff
noise(e1, stddev) # add a noise to each element from a gausian with standard-dev = stddev
dropout(e1, p)    # apply dropout with probability p 

# functions over lists of expressions
e = esum([e1, e2, ...])            # sum
e = average([e1, e2, ...])         # average
e = concatenate_cols([e1, e2, ...])  # e1, e2,.. are column vectors. return a matrix. (sim to np.hstack([e1,e2,...])
e = concatenate([e1, e2, ...])     # concatenate

e = affine_transform([e0,e1,e2, ...])  # e = e0 + ((e1*e2) + (e3*e4) ...) 

## Loss functions
e = squared_distance(e1, e2)
e = l1_distance(e1, e2)
e = huber_distance(e1, e2, c=1.345)

# e1 must be a scalar that is a value between 0 and 1
# e2 (ty) must be a scalar that is a value between 0 and 1
# e = ty * log(e1) + (1 - ty) * log(1 - e1)
e = binary_log_loss(e1, e2)

# e1 is row vector or scalar
# e2 is row vector or scalar
# m is number
# e = max(0, m - (e1 - e2))
e = pairwise_rank_loss(e1, e2, m=1.0) 

# Convolutions
# e1 \in R^{d x s} (input)
# e2 \in R^{d x m} (filter)
e = conv1d_narrow(e1, e2) # e = e1 *conv e2
e = conv1d_wide(e1, e2)   # e = e1 *conv e2
e = filter1d_narrow(e1, e2) # e = e1 *filter e2

e = kmax_pooling(e1, k) #  kmax-pooling operation (Kalchbrenner et al 2014)
e = kmh_ngram(e1, k) # 
e = fold_rows(e1, nrows=2) #

Recipe


In [6]:
from dynet import *

# create parameter collection
m = ParameterCollection()

# add parameters to parameter collection
pW = m.add_parameters((10,30))
pB = m.add_parameters(10)
lookup = m.add_lookup_parameters((500, 10))
print "added"

# create trainer 
trainer = SimpleSGDTrainer(m)

# Regularization is set via the --dynet-l2 commandline flag.
# Learning rate parameters can be passed to the trainer:
# alpha = 0.1  # learning rate
# trainer = SimpleSGDTrainer(m, e0=alpha)

# function for graph creation
def create_network_return_loss(inputs, expected_output):
    """
    inputs is a list of numbers
    """
    renew_cg()
    W = parameter(pW) # from parameters to expressions
    b = parameter(pB)
    emb_vectors = [lookup[i] for i in inputs]
    net_input = concatenate(emb_vectors)
    net_output = softmax( (W*net_input) + b)
    loss = -log(pick(net_output, expected_output))
    return loss

# function for prediction
def create_network_return_best(inputs):
    """
    inputs is a list of numbers
    """
    renew_cg()
    W = parameter(pW)
    b = parameter(pB)
    emb_vectors = [lookup[i] for i in inputs]
    net_input = concatenate(emb_vectors)
    net_output = softmax( (W*net_input) + b)
    return np.argmax(net_output.npvalue())


# train network
for epoch in xrange(5):
    for inp,lbl in ( ([1,2,3],1), ([3,2,4],2) ):
        print inp, lbl
        loss = create_network_return_loss(inp, lbl)
        print loss.value() # need to run loss.value() for the forward prop
        loss.backward()
        trainer.update()

print create_network_return_best([1,2,3])


added
[1, 2, 3] 1
2.71492385864
[3, 2, 4] 2
2.48228144646
[1, 2, 3] 1
2.00279903412
[3, 2, 4] 2
1.82602763176
[1, 2, 3] 1
1.44809651375
[3, 2, 4] 2
1.34181213379
[1, 2, 3] 1
1.03570735455
[3, 2, 4] 2
0.988352060318
[1, 2, 3] 1
0.744616270065
[3, 2, 4] 2
0.732948303223
1

Recipe (using classes)


In [4]:
from dynet import *
# create parameter collection
m = ParameterCollection()

# create a class encapsulating the network
class OurNetwork(object):
    # The init method adds parameters to the parameter collection.
    def __init__(self, pc):
        self.pW = pc.add_parameters((10,30))
        self.pB = pc.add_parameters(10)
        self.lookup = pc.add_lookup_parameters((500,10))
    
    # the __call__ method applies the network to an input
    def __call__(self, inputs):
        W = parameter(self.pW)
        b = parameter(self.pB)
        lookup = self.lookup
        emb_vectors = [lookup[i] for i in inputs]
        net_input = concatenate(emb_vectors)
        net_output = softmax( (W*net_input) + b)
        return net_output
    
    def create_network_return_loss(self, inputs, expected_output):
        renew_cg()
        out = self(inputs)
        loss = -log(pick(out, expected_output))
        return loss
       
    def create_network_return_best(self, inputs):
        renew_cg()
        out = self(inputs)
        return np.argmax(out.npvalue())
        
        
# create network
network = OurNetwork(m)

# create trainer 
trainer = SimpleSGDTrainer(m)
   
# train network
for epoch in xrange(5):
    for inp,lbl in ( ([1,2,3],1), ([3,2,4],2) ):
        print inp, lbl
        loss = network.create_network_return_loss(inp, lbl)
        print loss.value() # need to run loss.value() for the forward prop
        loss.backward()
        trainer.update()

print
print network.create_network_return_best([1,2,3])


[1, 2, 3] 1
2.5900914669
[3, 2, 4] 2
2.00347089767
[1, 2, 3] 1
1.98409461975
[3, 2, 4] 2
1.50869822502
[1, 2, 3] 1
1.50195622444
[3, 2, 4] 2
1.12316584587
[1, 2, 3] 1
1.12293696404
[3, 2, 4] 2
0.831095397472
[1, 2, 3] 1
0.833912611008
[3, 2, 4] 2
0.61754822731

1

or, alternatively, have the training outside of the network class


In [ ]:
# create network
network = OurNetwork(m)

# create trainer 
trainer = SimpleSGDTrainer(m)
   
# train network
for epoch in xrange(5):
    for inp,lbl in ( ([1,2,3],1), ([3,2,4],2) ):
        print inp, lbl
        renew_cg()
        out = network(inp)
        loss = -log(pick(out, lbl))
        print loss.value() # need to run loss.value() for the forward prop
        loss.backward()
        trainer.update()

print
print np.argmax(network([1,2,3]).npvalue())


[1, 2, 3] 1
3.63615298271
[3, 2, 4] 2
3.29473733902
[1, 2, 3] 1
2.81605744362
[3, 2, 4] 2
2.46070289612
[1, 2, 3] 1
2.13946056366
[3, 2, 4] 2
1.77259361744
[1, 2, 3] 1
1.57904195786
[3, 2, 4] 2
1.2269589901
[1, 2, 3] 1
1.13014268875
[3, 2, 4] 2
0.830479979515

1

In [ ]: