In [ ]:
import subprocess
print((subprocess.check_output("lscpu", shell=True).strip()).decode())
In [ ]:
import tensorflow as tf
print('Using tensorflow version: {} ...'.format(tf.__version__))
print('Visible devices for tensorflow: {} ...'.format(tf.config.list_physical_devices()))
In [ ]:
print('Running tensorflow test: 1 ...')
print(tf.reduce_sum(tf.random.normal([1000, 1000])))
In [ ]:
import time
print('Running tensorflow test: 2 ... ')
c_cpu = 'Failed'
cpu_time = '...'
start_cpu = time.time()
with tf.device('/cpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c_cpu = tf.matmul(a, b)
end_cpu = time.time()
cpu_time = end_cpu - start_cpu
c_gpu = 'Failed ...'
gpu_time = '...'
try:
start_gpu = time.time()
with tf.device('/gpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c_gpu = tf.matmul(a, b)
end_gpu = time.time()
gpu_time = end_gpu - start_gpu
except:
print('Tensorflow test using GPU failed ...')
print ('Matrix multiplication result using CPU: {} in {} seconds ... \
\nMatrix multiplication result using GPU: {} in {} seconds ...'.format(c_cpu, cpu_time,
c_gpu, gpu_time))
In [ ]: