In [1]:
import numpy as np
x = np.random.rand(10,1)

In [2]:
x


Out[2]:
array([[ 0.45871642],
       [ 0.54203033],
       [ 0.87051131],
       [ 0.95519652],
       [ 0.88706547],
       [ 0.26778672],
       [ 0.91209974],
       [ 0.42891815],
       [ 0.93760473],
       [ 0.85956294]])

In [3]:
from entropy import *

In [4]:
sampen(x, 3, .2*np.nanstd(x), scale='False')


entropy.py:135: RuntimeWarning: invalid value encountered in double_scalars
  return np.log(phi[0] / phi[1])
Out[4]:
nan

In [5]:
x


Out[5]:
array([[-0.98229551],
       [-0.65911908],
       [ 0.61506571],
       [ 0.94356145],
       [ 0.67927962],
       [-1.72291591],
       [ 0.77638812],
       [-1.09788363],
       [ 0.8753225 ],
       [ 0.57259674]])

In [6]:
%matplotlib inline

In [12]:
x = np.random.uniform(0, 1, 100)
print x


[ 0.3406064   0.93609219  0.09287116  0.98160245  0.35415332  0.35582736
  0.09499965  0.35504937  0.439993    0.51993592  0.45521146  0.68787607
  0.33282105  0.50132899  0.45684903  0.54462013  0.32593449  0.15673652
  0.54759404  0.80216888  0.86726189  0.50688216  0.03228189  0.493317
  0.6285932   0.27422955  0.74392975  0.68617432  0.89701449  0.53151096
  0.57687346  0.56925061  0.17221256  0.43819408  0.960681    0.90361505
  0.01186377  0.00370427  0.31694341  0.23018316  0.13085653  0.19507313
  0.93137937  0.28085951  0.6022596   0.10362895  0.43542715  0.07159372
  0.27193737  0.8764764   0.08446681  0.47562436  0.45376398  0.44607515
  0.30147826  0.17159588  0.27386805  0.35878197  0.93254302  0.00333709
  0.23232963  0.83667177  0.67953094  0.81273617  0.1118335   0.59972635
  0.60925938  0.95333006  0.39733952  0.00332773  0.35730669  0.6247951
  0.88863952  0.02210552  0.87368553  0.78541413  0.88740777  0.01203171
  0.11306778  0.42171631  0.74888631  0.80988023  0.5399705   0.28473775
  0.73531977  0.29123808  0.86301656  0.27275082  0.69837127  0.17235955
  0.72220691  0.68240817  0.40165893  0.85423956  0.83394295  0.02549719
  0.27638585  0.41131174  0.2418618   0.73117598]

In [13]:
"""
===============================================
FuzzyEn of Uniformly Distrubed Random Sequences
===============================================

Computes FuzzyEn of uniformly distributed random number sequences for different
values of fuzzy function width `r`. The result should look roughly linear.
"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt

try:
    import entropy
except:
    sys.path.insert(0, '..')
    import entropy



def main():
    N = 100
    rs = np.logspace(-3, 0, 10)

    fig, ax = plt.subplots()
    
    es = []
    for r in rs:
        runs = []
        for i in range(50):
            runs.append(entropy.fuzzyen(x, 2, r, 2))
        es.append(np.mean(runs))

    ax.semilogx(rs, es, 'o')
    ax.set_ylim(0, 6)

    plt.show()


if __name__ == '__main__':
    main()


===============================================
FuzzyEn of Uniformly Distrubed Random Sequences
===============================================

Computes FuzzyEn of uniformly distributed random number sequences for different
values of fuzzy function width `r`. The result should look roughly linear.


In [14]:
x


Out[14]:
array([-0.46201349,  1.60862382, -1.32344433,  1.76687317, -0.41490779,
       -0.40908681, -1.31604308, -0.41179204, -0.11642404,  0.16155537,
       -0.06350605,  0.74552085, -0.4890849 ,  0.09685493, -0.05781183,
        0.24738789, -0.513031  , -1.10137018,  0.25772885,  1.14294254,
        1.36928548,  0.11616452, -1.53412677,  0.06899545,  0.53938104,
       -0.69282063,  0.94043205,  0.73960348,  1.47274193,  0.20180438,
        0.35953996,  0.33303361, -1.04755651, -0.12267928,  1.69412463,
        1.49569353, -1.60512515, -1.63349757, -0.54429498, -0.84597978,
       -1.19136072, -0.96806521,  1.59223631, -0.66976677,  0.44781323,
       -1.28603707, -0.13230051, -1.39743076, -0.70079105,  1.40132638,
       -1.35266812,  0.00747418, -0.06853926, -0.09527505, -0.59807079,
       -1.04970085, -0.69407766, -0.39881295,  1.59628258, -1.63477431,
       -0.83851602,  1.2629168 ,  0.71650297,  1.17968736, -1.25750805,
        0.43900454,  0.47215304,  1.66856375, -0.26473972, -1.63480686,
       -0.40394285,  0.5261742 ,  1.4436203 , -1.5695123 ,  1.39162191,
        1.0846825 ,  1.43933722, -1.6045412 , -1.25321615, -0.17997618,
        0.95766712,  1.16975661,  0.23122009, -0.65628128,  0.91049323,
       -0.63367817,  1.35452351, -0.69796252,  0.78201502, -1.04704541,
        0.86489685,  0.72650775, -0.24972018,  1.32400392,  1.25342809,
       -1.55771868, -0.68532268, -0.21615519, -0.80537054,  0.89608432])

In [16]:
def cross_samp_entropy(X1, X2, M, R):
	"""Computer sample entropy (SampEn) of series X, specified by M and R.

	SampEn is very close to ApEn. 

	Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
	embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em 
	is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
	1 and M-1 respectively. Such a matrix can be built by calling pyeeg function 
	as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only 
	difference with Em is that the length of each embedding sequence is M + 1

	Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments 
	are	Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
	is defined as 1) the maximum difference of their corresponding scalar 
	components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
	vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them 
	is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
	defined as 20% - 30% of standard deviation of X. 

	Pick Em[i] as a template, for all j such that 0 < j < N - M , we can 
	check whether Em[j] matches with Em[i]. Denote the number of Em[j],  
	which is in the range of Em[i], as k[i], which is the i-th element of the 
	vector k.

	We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.

	The SampEn is defined as log(sum(Cm)/sum(Cmp))

	References
	----------

	Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biolgical
	signals, Physical Review E, 71:021906, 2005

	See also
	--------
	ap_entropy: approximate entropy of a time series


	Notes
	-----
	Extremely slow computation. Do NOT use if your dataset is not small and you
	are not patient enough.

	"""
    
    if len(X1) != len(X2):
        raise 'NameError'
    
	N = len(X1)

	Em = embed_seq(X, 1, M)	
	Emp = embed_seq(X, 1, M + 1)

	Cm, Cmp = zeros(N - M - 1) + 1e-100, zeros(N - M - 1) + 1e-100
	# in case there is 0 after counting. Log(0) is undefined.

	for i in xrange(0, N - M):
		for j in xrange(i + 1, N - M): # no self-match
#			if max(abs(Em[i]-Em[j])) <= R:  # v 0.01_b_r1 
			if in_range(Em[i], Em[j], R):
				Cm[i] += 1
#			if max(abs(Emp[i] - Emp[j])) <= R: # v 0.01_b_r1
				if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
					Cmp[i] += 1

	Samp_En = log(sum(Cm)/sum(Cmp))

	return Samp_En


  File "<ipython-input-16-87982958c1ad>", line 1
    def cross_samp_entropy([X1, X2], M, R):
                           ^
SyntaxError: invalid syntax

In [ ]: