Last time in Week 05, we covered Numpy
and Matplotlib
. This time we will be focusing on more advanced concepts of Numpy
.
In [1]:
# Loading modules
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
In [2]:
x = np.array([1,2,3,5,6,7,8,10],dtype=float)
x
Out[2]:
In [3]:
y = np.arange(10)
y
Out[3]:
In [4]:
z = np.linspace(0,100,50)
z
Out[4]:
In [5]:
h = np.random.randn(100)
h
Out[5]:
In [6]:
print('Min X: {0:.3f} \t Max X: {1:.3f}'.format(np.min(x), np.max(x)) )
In [7]:
zz = x**2 + 3*x**3
zz
Out[7]:
In [8]:
zz_idx = np.where((zz>= 100)&(zz <= 500))[0]
print('zz_idx: {0}'.format(zz_idx))
zz[zz_idx]
Out[8]:
In [9]:
h1 = np.random.randint(10, 50, 50)
h1
Out[9]:
We can get the overall size and shape of the array.
We cane use the functions numpy.size
and numpy.shape
to get the total number of elements in an array and the shape of the array, respectively.
In [10]:
np.size(h1)
Out[10]:
In [11]:
h1.shape
Out[11]:
In [12]:
A = np.array([[1,2,3,4,5],
[6,7,8,9,10],
[12,13,14,16,17],
[13,45,67,89,90] ])
A
Out[12]:
In [13]:
np.shape(A)
Out[13]:
You can also transpose array A
.
In [14]:
A_t = np.transpose(A)
A_t
Out[14]:
Why are Numpy
arrays better than lists:
Numpy
arrays are memory efficient.
In [15]:
np.arange(0,10,1)
Out[15]:
In [16]:
np.arange(0,20,5)
Out[16]:
In [17]:
np.arange(-40,21,10)
Out[17]:
In [18]:
B = np.linspace(0,50)
B
Out[18]:
In [19]:
B = np.linspace(0,100, 20)
B
Out[19]:
Array of 25 elements from $10^{0}$ to $10^{3}$, with base of 10.
In [20]:
B = np.logspace(0,3,25)
B
Out[20]:
Creating an array of 11 elements from $e^{0}$ to $e^{10}$, with the base == numpy.e
In [21]:
B = np.logspace(0,10,11, base=np.e)
B
Out[21]:
In [22]:
from numpy import random
In [23]:
# Uniform random numbers in [0,1]
random.rand(5,5)
Out[23]:
In [24]:
# 20 Random integers from 10 to 30
random.randint(10,30,20)
Out[24]:
In [25]:
np.zeros(20)
Out[25]:
You can use these to populate other arrays
In [26]:
nelem = 10
C = np.ones(10)
C
Out[26]:
In [27]:
for ii in range(C.size):
C[ii] = random.rand()
C
Out[27]:
In [28]:
np.diag(random.randint(10,20,5))
Out[28]:
You can choose which values to select.
Normally, you select the rows
first, and then the cols
of a numpy.ndarray
.
In [29]:
M = random.rand(10,5)
M
Out[29]:
Selecting the 1st row
In [30]:
M[1,:]
Out[30]:
The 2nd column
In [31]:
M[:,1]
Out[31]:
Select a range of columns and rows
In [32]:
M[1:3, 2:4]
Out[32]:
You can easily use this to create a mask, for when you are cleaning your data.
In [33]:
A = random.rand(3,3)
np.fill_diagonal(A, np.nan)
A
Out[33]:
In [34]:
B = np.arange(0,9).reshape((3,3))
B
Out[34]:
Appying the mask from $A \to B$
In [35]:
A_mask = np.isfinite(A)
A_mask
Out[35]:
In [36]:
B[A_mask]
Out[36]:
In [37]:
# Creating my bin edges
bins = np.arange(0,13)
bins
Out[37]:
In [38]:
# Generating Data
data = 10*random.rand(100)
data
Out[38]:
Now I want to bin my data and calculate the mean for each bin
In [39]:
# Defining statistical function to use
stat_func = np.nanmean
# Binning the data
data_bins = np.digitize(data, bins)
data_bins
Out[39]:
Calculating the mean for each of the bins
In [40]:
failval = -10
bins_stat = np.array([stat_func(data[data_bins == ii]) \
if len(data[data_bins == ii]) > 0 \
else failval \
for ii in range(1,len(bins))])
bins_stat = np.asarray(bins_stat)
bins_stat
Out[40]:
You can put all of this into a function that estimates errors and more...
In [41]:
import math
def myceil(x, base=10):
"""
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
"""
n_high = float(base*math.ceil(float(x)/base))
return n_high
def myfloor(x, base=10):
"""
Returns the lower-bound integer of 'x' in base 'base'
Parameters
----------
x: float
number to be approximated to closest number of 'base'
base: float
base used to calculate the closest 'smallest' number
Returns
-------
n_low: float
Closest float number to 'x', i.e. lower-bound float.
Example
-------
>>>> myfloor(12, 5)
>>>> 10
"""
n_low = float(base*math.floor(float(x)/base))
return n_low
def Bins_array_create(arr, base=10):
"""
Generates array between [arr.min(), arr.max()] in steps of `base`.
Parameters
----------
arr: array_like, Shape (N,...), One-dimensional
Array of numerical elements
base: float, optional (default=10)
Interval between bins
Returns
-------
bins_arr: array_like
Array of bin edges for given arr
"""
base = float(base)
arr = np.array(arr)
assert(arr.ndim==1)
arr_min = myfloor(arr.min(), base=base)
arr_max = myceil( arr.max(), base=base)
bins_arr = np.arange(arr_min, arr_max+0.5*base, base)
return bins_arr
In [42]:
def Mean_std_calc_one_array(x1, y1, arr_len=0, statfunc=np.nanmean,
failval=np.nan, error='std',
base=10.):
"""
Calculates statistics of two arrays, e.g. scatter,
error in `statfunc`, etc.
Parameters
----------
x1: array-like, shape (N,)
array of x-values
y1: array-like, shape (N,)
array of y-values
arr_len: int, optional (default = 0)
minimum number of elements in the bin
statfunc: numpy function, optional (default = numpy.nanmean)
statistical function used to evaluate the bins
failval: int or float, optional (default = numpy.nan)
Number to use to replace when the number of elements in the
bin is smaller than `arr_len`
error: string, optional (default = 'std')
type of error to evaluate
Options:
- 'std': Evaluates the standard deviation of the bin
- 'stat': Evaluates the error in the mean/median of each bin
- 'none': Does not calculate the error in `y1`
base: float
Value of bin width in units of that of `x1`
Returns
--------
x1_stat: array-like, shape (N,)
`stat_func` of each bin in `base` spacings for x1
y1_stat: array-like, shape (N,)
`stat_func` of each bin in `base` spacings for y1
"""
x1 = np.asarray(x1)
y1 = np.asarray(y1)
assert((x1.ndim==1) & (y1.ndim==1))
assert((x1.size >0) & (y1.size>0))
n_elem = len(x1)
## Computing Bins
x1_bins = Bins_array_create(x1, base=base)
x1_digit = np.digitize(x1, x1_bins)
## Computing Statistics in bins
x1_stat = np.array([statfunc(x1[x1_digit==ii])
if len(x1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
y1_stat = np.array([statfunc(y1[x1_digit==ii])
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
## Computing error in the data
if error=='std':
stat_err = np.nanstd
y1_err = np.array([stat_err(y1[x1_digit==ii])
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
if error!='none':
y1_err = np.array([stat_err(y1[x1_digit==ii])/np.sqrt(len(y1[x1_digit==ii]))
if len(y1[x1_digit==ii])>arr_len
else failval
for ii in range(1,x1_bins.size)])
if (stat_func==np.median) or (stat_func==np.nanmedian):
y1_err *= 1.253
else:
y1_err = np.zeros(y1.stat.size)
return x1_stat, y1_stat, y1_err
Example of using these function:
In [43]:
import numpy as np
# Defining arrays
x_arr = np.arange(100)
y_arr = 50*np.random.randn(x_arr.size)
# Computing mean and error in the mean for `x_arr` and `y_arr`
x_stat, y_stat, y_err = Mean_std_calc_one_array(x_arr, y_arr,
statfunc=np.nanmean,
failval=np.nan,
base=10)
x_stat2, y_stat2, y_err2 = Mean_std_calc_one_array(x_arr, y_arr,
statfunc=np.nanmedian,
failval=np.nan,
base=10)
In [44]:
plt.style.use('seaborn-notebook')
plt.clf()
plt.close()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111,facecolor='white')
ax.plot(x_arr, y_arr, 'ro', label='Data')
ax.errorbar(x_stat, y_stat, yerr=y_err, color='blue', marker='o',
linestyle='--',label='Mean')
ax.errorbar(x_stat2, y_stat2, yerr=y_err2, color='green', marker='o',
linestyle='--',label='Median')
ax.set_xlabel('X axis', fontsize=20)
ax.set_ylabel('Y axis', fontsize=20)
ax.set_title('Data and the Binned Data', fontsize=24)
plt.legend(fontsize=20)
plt.show()
With this function, it is really easy to apply statistics on binned data, as well as to estimate errors on the data.
In [45]:
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
A
Out[45]:
In [46]:
n, m = A.shape
In [47]:
B = A.reshape((1,n*m))
B
Out[47]:
In [48]:
A_f = A.flatten()
A_f
Out[48]:
In [49]:
C = random.rand(A.size)
C
Out[49]:
In [50]:
C.shape
Out[50]:
In [51]:
# Stacking the two arrays
D = np.column_stack((A_f,C))
D
Out[51]:
In [52]:
# Selecting from 3rd to 11th row
D[2:10]
Out[52]:
In [53]:
a = np.array([[1, 2], [3, 4]])
b = np.array([[5,6]])
In [54]:
np.concatenate((a,b))
Out[54]:
In [55]:
np.concatenate((a,b.T), axis=1)
Out[55]:
In [56]:
A = np.array([[1, 2], [3, 4]])
A
Out[56]:
In [57]:
# `B` is now referring to the same array data as `A`
B = A
If we make any changes to B
, A
will also be affected by this change.
In [58]:
B[0,0] = 10
B
Out[58]:
In [59]:
A
Out[59]:
To get a completely independent, new object, you would use:
In [60]:
B = np.copy(A)
# Modifying `B`
B[0,0] = -5
B
Out[60]:
In [61]:
A
Out[61]:
The array A
was not affected by this changed. This is important when you're constantly re-defining new arrays
SciPy
provides a large number of higher-level scientif algorithms.
It includes:
In [62]:
import scipy as sc
In [63]:
from scipy.interpolate import interp1d
In [64]:
def f(x):
return np.sin(x)
In [65]:
n = np.arange(0, 10)
x = np.linspace(0, 9, 100)
y_meas = f(n) + 0.1 * np.random.randn(len(n)) # simulate measurement with noise
y_real = f(x)
linear_interpolation = interp1d(n, y_meas)
y_interp1 = linear_interpolation(x)
cubic_interpolation = interp1d(n, y_meas, kind='cubic')
y_interp2 = cubic_interpolation(x)
In [66]:
fig, ax = plt.subplots(figsize=(15,6))
ax.set_facecolor('white')
ax.plot(n, y_meas, 'bs', label='noisy data')
ax.plot(x, y_real, 'k', lw=2, label='true function')
ax.plot(x, y_interp1, 'r', label='linear interp')
ax.plot(x, y_interp2, 'g', label='cubic interp')
ax.legend(loc=3, prop={'size':20});
ax.tick_params(axis='both', which='major', labelsize=20)
ax.tick_params(axis='both', which='minor', labelsize=15)
In [67]:
Lbox = 250.
Npts = 1000
# Creating cartesian coordinates
x = np.random.uniform(0, Lbox, Npts)
y = np.random.uniform(0, Lbox, Npts)
z = np.random.uniform(0, Lbox, Npts)
sample1 = np.vstack([x, y, z]).T
sample1
Out[67]:
In [68]:
sample1.shape
Out[68]:
Let's say we want to know how many points are within distances of 30 and 50 from other points. To know this, you construct a KD-Tree
In [69]:
from scipy.spatial import cKDTree
In [70]:
# Initializing KDTree
KD_obj = cKDTree(sample1)
In [71]:
N_neighbours = cKDTree.count_neighbors(KD_obj, KD_obj, 50) - \
cKDTree.count_neighbors(KD_obj, KD_obj, 30)
print("Number of Neighbours: {0}".format(N_neighbours))
Let's say you want to get the distances to the Nth-nearest neighbor.
In [72]:
k_nearest = 4
dist_k, dist_k_idx = cKDTree.query(KD_obj, sample1, k_nearest)
dist_k
Out[72]:
You can also get the indices
In [73]:
dist_k_idx
Out[73]:
The first columns corresponds to itself.
You can also find pairs that are separated by at most a distance r
In [74]:
pairs = KD_obj.query_ball_tree(KD_obj, 30)
pairs[0:10]
Out[74]:
So that's it for today's lesson