This iPython Notebook is a demo of GCC-NMF blind source separation algorithm, combining:
Separation is performed directly on the stereo mixture signal using no additional data:
This demo separates the speech sources from the data/dev1_female3_liverec_130ms_1m_mix.wav mixture, taken from the SiSEC 2016 Underdetermined speech mixtures "dev1" dataset, and saves results to the data directory.
In [1]:
from gccNMF.gccNMFFunctions import *
from gccNMF.gccNMFPlotting import *
from IPython import display
%matplotlib inline
In [2]:
# Preprocessing params
windowSize = 1024
fftSize = windowSize
hopSize = 128
windowFunction = hanning
# TDOA params
numTDOAs = 128
# NMF params
dictionarySize = 128
numIterations = 100
sparsityAlpha = 0
# Input params
mixtureFileNamePrefix = '../data/dev1_female3_liverec_130ms_1m'
microphoneSeparationInMetres = 1.0
numSources = 3
In [3]:
mixtureFileName = getMixtureFileName(mixtureFileNamePrefix)
stereoSamples, sampleRate = loadMixtureSignal(mixtureFileName)
numChannels, numSamples = stereoSamples.shape
durationInSeconds = numSamples / float(sampleRate)
In [4]:
describeMixtureSignal(stereoSamples, sampleRate)
figure(figsize=(14, 6))
plotMixtureSignal(stereoSamples, sampleRate)
display.display( display.Audio(mixtureFileName) )
In [5]:
complexMixtureSpectrogram = computeComplexMixtureSpectrogram( stereoSamples, windowSize,
hopSize, windowFunction )
numChannels, numFrequencies, numTime = complexMixtureSpectrogram.shape
frequenciesInHz = getFrequenciesInHz(sampleRate, numFrequencies)
frequenciesInkHz = frequenciesInHz / 1000.0
In [6]:
describeMixtureSpectrograms(windowSize, hopSize, windowFunction, complexMixtureSpectrogram)
figure(figsize=(12, 8))
plotMixtureSpectrograms(complexMixtureSpectrogram, frequenciesInkHz, durationInSeconds)
In [7]:
spectralCoherenceV = complexMixtureSpectrogram[0] * complexMixtureSpectrogram[1].conj() \
/ abs(complexMixtureSpectrogram[0]) / abs(complexMixtureSpectrogram[1])
angularSpectrogram = getAngularSpectrogram( spectralCoherenceV, frequenciesInHz,
microphoneSeparationInMetres, numTDOAs )
meanAngularSpectrum = mean(angularSpectrogram, axis=-1)
targetTDOAIndexes = estimateTargetTDOAIndexesFromAngularSpectrum( meanAngularSpectrum,
microphoneSeparationInMetres,
numTDOAs, numSources)
In [8]:
figure(figsize=(14, 6))
plotGCCPHATLocalization( spectralCoherenceV, angularSpectrogram, meanAngularSpectrum,
targetTDOAIndexes, microphoneSeparationInMetres, numTDOAs,
durationInSeconds )
In [9]:
V = concatenate( abs(complexMixtureSpectrogram), axis=-1 )
W, H = performKLNMF(V, dictionarySize, numIterations, sparsityAlpha)
numChannels = stereoSamples.shape[0]
stereoH = array( hsplit(H, numChannels) )
In [10]:
describeNMFDecomposition(V, W, H)
figure(figsize=(12, 12))
plotNMFDecomposition(V, W, H, frequenciesInkHz, durationInSeconds, numAtomsToPlot=15)
In [11]:
targetTDOAGCCNMFs = getTargetTDOAGCCNMFs( spectralCoherenceV, microphoneSeparationInMetres,
numTDOAs, frequenciesInHz, targetTDOAIndexes, W,
stereoH )
targetCoefficientMasks = getTargetCoefficientMasks(targetTDOAGCCNMFs, numSources)
In [12]:
figure(figsize=(12, 12))
plotCoefficientMasks(targetCoefficientMasks, stereoH, durationInSeconds)
In [13]:
targetSpectrogramEstimates = getTargetSpectrogramEstimates( targetCoefficientMasks,
complexMixtureSpectrogram, W,
stereoH )
In [14]:
figure(figsize=(12, 12))
plotTargetSpectrogramEstimates(targetSpectrogramEstimates, durationInSeconds, frequenciesInkHz)
In [15]:
targetSignalEstimates = getTargetSignalEstimates( targetSpectrogramEstimates, windowSize,
hopSize, windowFunction )
saveTargetSignalEstimates(targetSignalEstimates, sampleRate, mixtureFileNamePrefix)
In [16]:
for sourceIndex in range(numSources):
figure(figsize=(14, 2))
fileName = getSourceEstimateFileName(mixtureFileNamePrefix, sourceIndex)
plotTargetSignalEstimate( targetSignalEstimates[sourceIndex], sampleRate,
'Source %d' % (sourceIndex+1) )
display.display(display.Audio(fileName))
In [ ]: