In [1]:
from pyspark.context import SparkContext
from bigdl.util.common import *
sc = get_spark_context(conf=create_spark_conf()) # in case sparkcontext haven't been created.
init_engine()

In [3]:
from bigdl.nn.layer import *

fc1 = Linear(4, 2)()
fc2 = Linear(4, 2)()
cadd = CAddTable()([fc1, fc2])
output1 = ReLU()(cadd)
output2 = Threshold(10.0)(cadd)
model = Model([fc1, fc2], [output1, output2])
fc1.element().set_weights([np.ones((4, 2)), np.ones((2, ))])
fc2.element().set_weights([np.ones((4, 2)) * 2, np.ones((2, )) * 2])
output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
                        np.array([0.5, 0.4, -0.2, -0.1])])
gradInput = model.backward([np.array([0.1, 0.2, -0.3, -0.4]),
                            np.array([0.5, 0.4, -0.2, -0.1])],
                           [np.array([1.0, 2.0]),
                                    np.array([3.0, 4.0])])
weights = fc1.element().get_weights()[0]
print(weights)
print(gradInput)


creating: createLinear
creating: createLinear
creating: createCAddTable
creating: createReLU
creating: createThreshold
creating: createModel
[[ 1.  1.  1.  1.]
 [ 1.  1.  1.  1.]]
[array([ 3.,  3.,  3.,  3.], dtype=float32), array([ 6.,  6.,  6.,  6.], dtype=float32)]

In [ ]: