Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0.
A device instance represents a hardware device with multiple execution units, e.g.,
All data structures (variables) are allocated on a device instance. Consequently, all operations are executed on the resident device.
In [1]:
from singa import device
default_dev = device.get_default_device()
gpu = device.create_cuda_gpu() # the first gpu device
gpu
Out[1]:
NOTE: currently we can only call the creating function once due to the cnmem restriction.
In [ ]:
gpu = device.create_cuda_gpu_on(1) # use the gpu device with the specified GPU ID
gpu_list1 = device.create_cuda_gpus(2) # the first two gpu devices
gpu_list2 = device.create_cuda_gpus([0,2]) # create the gpu instances on the given GPU IDs
opencl_gpu = device.create_opencl_device() # valid if SINGA is compiled with USE_OPENCL=ON
In [2]:
device.get_num_gpus()
Out[2]:
In [3]:
device.get_gpu_ids()
Out[3]:
In [4]:
from singa import tensor
import numpy as np
a = tensor.Tensor((2, 3))
a.shape
Out[4]:
In [5]:
a.device
Out[5]:
In [6]:
gb = tensor.Tensor((2, 3), gpu)
In [7]:
gb.device
Out[7]:
In [8]:
a.set_value(1.2)
gb.gaussian(0, 0.1)
In [9]:
tensor.to_numpy(a)
Out[9]:
In [10]:
tensor.to_numpy(gb)
Out[10]:
In [11]:
c = tensor.from_numpy(np.array([1,2], dtype=np.float32))
c.shape
Out[11]:
In [12]:
c.copy_from_numpy(np.array([3,4], dtype=np.float32))
tensor.to_numpy(c)
Out[12]:
In [13]:
gc = c.clone()
gc.to_device(gpu)
gc.device
Out[13]:
In [14]:
b = gb.clone()
b.to_host() # the same as b.to_device(default_dev)
b.device
Out[14]:
In [15]:
gb.l1()
Out[15]:
In [16]:
a.l2()
Out[16]:
In [17]:
e = tensor.Tensor((2, 3))
e.is_empty()
Out[17]:
In [18]:
gb.size()
Out[18]:
In [19]:
gb.memsize()
Out[19]:
In [20]:
# note we can only support matrix multiplication for tranposed tensors;
# other operations on transposed tensor would result in errors
c.is_transpose()
Out[20]:
In [21]:
et=e.T()
et.is_transpose()
Out[21]:
In [22]:
et.shape
Out[22]:
In [23]:
et.ndim()
Out[23]:
In [24]:
a += b
tensor.to_numpy(a)
Out[24]:
In [25]:
a -= b
tensor.to_numpy(a)
Out[25]:
In [26]:
a *= 2
tensor.to_numpy(a)
Out[26]:
In [27]:
a /= 3
tensor.to_numpy(a)
Out[27]:
In [28]:
d = tensor.Tensor((3,))
d.uniform(-1,1)
tensor.to_numpy(d)
Out[28]:
In [29]:
a.add_row(d)
tensor.to_numpy(a)
Out[29]:
In [30]:
h = tensor.sign(d)
tensor.to_numpy(h)
Out[30]:
In [31]:
tensor.to_numpy(d)
Out[31]:
In [32]:
h = tensor.abs(d)
tensor.to_numpy(h)
Out[32]:
In [33]:
h = tensor.relu(d)
tensor.to_numpy(h)
Out[33]:
In [34]:
g = tensor.sum(a, 0)
g.shape
Out[34]:
In [35]:
g = tensor.sum(a, 1)
g.shape
Out[35]:
In [36]:
tensor.bernoulli(0.5, g)
tensor.to_numpy(g)
Out[36]:
In [37]:
g.gaussian(0, 0.2)
tensor.gaussian(0, 0.2, g)
tensor.to_numpy(g)
Out[37]:
In [38]:
f = a + b
tensor.to_numpy(f)
Out[38]:
In [39]:
g = a < b
tensor.to_numpy(g)
Out[39]:
In [40]:
tensor.add_column(2, c, 1, f) # f = 2 *c + 1* f
tensor.to_numpy(f)
Out[40]:
In [41]:
tensor.axpy(2, a, f) # f = 2a + f
tensor.to_numpy(b)
Out[41]:
In [42]:
f = tensor.mult(a, b.T())
tensor.to_numpy(f)
Out[42]:
In [43]:
tensor.mult(a, b.T(), f, 2, 1) # f = 2a*b.T() + 1f
tensor.to_numpy(f)
Out[43]: