In [1]:
# importing
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# showing figures inline
%matplotlib inline
In [2]:
# plotting options
font = {'size' : 20}
plt.rc('font', **font)
plt.rc('text', usetex=True)
matplotlib.rc('figure', figsize=(18, 6) )
In [3]:
# showing example in Jondral 02, WT
# parameters: sample time and number of samples
t_sample = .01
N_samples = 5000
# vector of times
t = np.arange( 0, N_samples * t_sample, t_sample )
# parameters of gaussian
mu = 0
sigma2 = 1
# sample random white gaussian
X_sim = np.random.randn( len( t ) )
# get theoretical pdf
x = np.arange( -3*sigma2, 3*sigma2, .01 )
f_theo_norm = 1 / np.sqrt( 2*np.pi*sigma2 ) * np.exp( - x**2 / 2 / sigma2 )
# plotting
plt.plot( t, X_sim, linewidth=2.0, label='$X(t)$')
plt.hist( X_sim, 100, color='#ff7f0e', density=1, orientation='horizontal', label='$H_{{{}}}(\cdot)$'.format(N_samples), alpha=0.75)
plt.plot( f_theo_norm, x, color='b', linewidth = 2.0, label='$f_{theo}(\cdot)$' )
plt.xlabel('$t/\\mathrm{s}$')
plt.grid( True )
plt.legend( loc = 'upper right' )
plt.xlim( ( 0, 200*t_sample ) )
Out[3]:
In [4]:
# print moments of the process and the realization
print( ' E(X) = {}, \tergodic = {:2.2f}'.format( mu, np.mean( X_sim ) ) )
print( ' E(X^2) = {}, \tergodic = {:2.2f}'.format( mu**2 + sigma2, np.mean( X_sim**2 ) ) )
print( ' E(X^3) = {}, \tergodic = {:2.2f}'.format( mu**3 + 3 * mu * sigma2, np.mean( X_sim**3 ) ) )
print( ' E(X^4) = {}, \tergodic = {:2.2f}'.format( mu*4 + 6 * mu**2 * sigma2 + 3 * sigma2**2, np.mean( X_sim**4 ) ) )
In [5]:
# get acf of realization by correlating in time
acf = np.correlate( X_sim, X_sim, mode='full' )
acf /= np.max( acf )
# determine vector of sample values
# NOTE: ACF is symmetrical; so, negative values will be observed as well
tau = np.arange( -N_samples * t_sample, (N_samples -1)* t_sample, t_sample )
# plotting
plt.plot( tau, acf, linewidth=2.0, label='$\\varphi_{XX}(\\tau)$')
plt.xlabel('$\\tau/\\mathrm{s}$')
plt.grid( True )
plt.legend( loc = 'upper right' )
plt.ylim( (-.3, 1.1) )
Out[5]:
In [6]:
# parameters of time
t_sample = .01
t = np.arange( 0, N_samples * t_sample, t_sample )
# delays for correlation
k_corr = np.arange( -len(X_sim), len(X_sim)-1, 1)
# sample white gaussian noise as input signal
mu = 0
sigma2 = 1
X_sim = np.random.randn( len( t ) )
# generate random delay and stretch X accordingly
delay = np.random.randint( int( len(t) / 10 ) )
X_extended = np.append( X_sim, np.zeros( (delay, 1) ) )
# add noise to the observation
# NOTE: noise variance may be changed to see possible effects,
# for example, that results are fine up to quite large noise
D2_noise = 200
noise = np.sqrt( D2_noise ) * np.random.randn( len( X_sim ) )
# get output signal be delaying and adding noise
Y_sim = np.roll( X_extended, delay)[: len( X_sim) ] + noise
# cross-correlating received/measured signal Y with input X
corr = np.correlate( Y_sim, X_sim, 'full')
# show max of correlation and actual delay
delay_est = int( ( np.argmax( corr ) - ( len(corr) - 1 ) / 2 ) )
# showing delays (actual and estimated) and SNR
print('Delay as sampled: \t\t\t{}'.format( delay ) )
print('Delay as max. of ccf: \t\t\t{}'.format( delay_est ) )
print('Estimated Signal-to-Noise-Power-Ratio: \t{:2.2f} dB'.format( 10*np.log10( np.linalg.norm(X_sim)**2 / np.linalg.norm(Y_sim-X_sim)**2 ) ) )
In [7]:
# plotting
plt.plot( k_corr, np.correlate( X_sim, X_sim, 'full'), label='$\\varphi_{XX}(k)$' )
plt.plot( k_corr, corr, label='$\\varphi_{YX}(k)$')
plt.xlabel('$k$')
plt.ylabel('$\\varphi_{YX}(k)$')
plt.title('SNR = {:2.2f} dB'.format( 10*np.log10( np.linalg.norm(X_sim)**2 / np.linalg.norm(Y_sim-X_sim)**2 ) ) )
plt.grid(1)
plt.legend(loc='upper right')
Out[7]:
In [8]:
plt.plot( Y_sim , color='C1', label='$Y[n]$')
plt.plot( X_sim , label='$X[n]$')
plt.xlabel('$n$')
plt.title('SNR = {:2.2f} dB'.format( 10*np.log10( np.linalg.norm(X_sim)**2 / np.linalg.norm(Y_sim-X_sim)**2 ) ) )
plt.grid(1)
plt.legend(loc='upper right')
Out[8]:
In [ ]: