MPI slides
We will use:
However, we will limit usage of ipyparallel
features to minimum. Most of parallel logic will be in MPI standard.
In [4]:
%%writefile mpi001.py
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print "OK, rank= ",rank,size
In [12]:
!mpirun -n 4 /opt/conda/envs/py27/bin/python mpi001.py
In [20]:
!mpirun -n 4 python2 mpi001.py
In [21]:
!mpirun -n 4 python mpi001.py
In [14]:
import ipyparallel as ipp
c = ipp.Client(profile='mpi')
print(c.ids)
view = c[:]
view.activate()
In [24]:
%%writefile mpi001.py
from mpi4py import MPI
import time
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
time.sleep(5)
import os
a = 1.23 + rank
print ("OK, rank= ",rank,size,os.getpid())
In [25]:
ar = view.run('mpi001.py')
In [35]:
ar
Out[35]:
In [30]:
ar.display_outputs()
In [32]:
ar = view.execute('mpi001.py')
In [33]:
view['a']
Out[33]:
this is equivalent to:
In [36]:
view.pull('a', block=True)
Out[36]:
In [ ]:
important!
In [55]:
view.activate()
In [37]:
%%px
from mpi4py import MPI
import time
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
time.sleep(5)
import os
a = 1.23 + rank
print ("OK, rank= ",rank,size,os.getpid(),a)
Out[37]:
In [38]:
%pxresult
In [40]:
%%px --block
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.rank
print(comm.size,comm.rank)
In [12]:
import ipyparallel as ipp
c = ipp.Client(profile='mpi')
print(c.ids)
view = c[:]
view.activate()
In [13]:
import os
print(view.apply_sync(os.getcwd))
In [14]:
view.map(os.chdir, ['ProgramowanieRownolegle/MPI']*len(c.ids))
Out[14]:
In [15]:
print(view.apply_sync(os.getcwd))
In [ ]: