In [1]:
# Sequential code
from time import sleep
def slowinc(x):
    sleep(1)  # take a bit of time to simulate real work
    return x + 1

In [ ]:
[slowinc(i) for i in range(10)]  # this takes 10 seconds

In [ ]:
#from joblib import Parallel, delayed
#Parallel(n_jobs=4)(delayed(slowinc)(i) for i in range(10))  # this takes 3 seconds

In [2]:
from dask.distributed import Client
#client = Client()  # set up local cluster on your laptop
client = Client('104.155.153.59:8786')
client


Out[2]:

Client

Cluster

  • Workers: 8
  • Cores: 16
  • Memory: 54.19 GB

In [ ]:
client.get_versions(check=True)

In [ ]:


In [5]:
import distributed.joblib
from joblib import Parallel, delayed
from joblib import parallel_backend

with parallel_backend('dask.distributed', scheduler_host='104.155.153.59:8786'):
    print(Parallel()(delayed(slowinc)(i) for i in list(range(25))))


[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.comm.tcp - WARNING - Closing dangling stream in <TCP local=tcp://10.80.77.249:63656 remote=tcp://104.155.153.59:8786>
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler
distributed.client - WARNING - Client report stream closed to scheduler