In [1]:
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.models import Sequential, model_from_json
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.wrappers.scikit_learn import KerasClassifier


Using Theano backend.
WARNING (theano.sandbox.cuda): The cuda backend is deprecated and will be removed in the next release (v0.10).  Please switch to the gpuarray backend. You can get more information about how to switch at this URL:
 https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29

Using gpu device 0: GeForce GTX 980M (CNMeM is enabled with initial size: 80.0% of memory, cuDNN 5005)

In [2]:
import math

from matplotlib import pyplot as plt
%matplotlib inline

import numpy as np
from python_speech_features import fbank, logfbank

from scipy.ndimage.interpolation import shift
from scipy.signal import spectrogram

from sklearn.metrics import classification_report
from sklearn.model_selection import KFold, train_test_split
from sklearn.preprocessing import StandardScaler

import soundfile

from onset_detection.metrics import onset_metric
from onset_detection.read_data import read_data

Spectrogram tests


In [13]:
samples, sample_rate = soundfile.read(r'data\IDMT-SMT-GUITAR_V2\dataset2\audio\AR_Lick10_KN.wav')
print(samples.shape)
print(sample_rate)


(706816,)
44100

In [26]:
f, t, Sxx = spectrogram(np.absolute(samples), sample_rate)
print(f.shape)
print(t.shape)
print(Sxx.shape)
plt.figure(figsize=(10,10))
plt.pcolormesh(t, f, Sxx, cmap='RdBu')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')


(129,)
(3155,)
(129, 3155)
Out[26]:
<matplotlib.text.Text at 0x20a83336400>

In [29]:
f, t, Sxx = spectrogram(samples, sample_rate, mode='magnitude')
print(f.shape)
print(f)
print(t.shape)
print(t)
print(Sxx.shape)
plt.figure(figsize=(10,10))
plt.pcolormesh(t, f, Sxx, cmap='RdBu')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')


(129,)
[     0.          172.265625    344.53125     516.796875    689.0625
    861.328125   1033.59375    1205.859375   1378.125      1550.390625
   1722.65625    1894.921875   2067.1875     2239.453125   2411.71875
   2583.984375   2756.25       2928.515625   3100.78125    3273.046875
   3445.3125     3617.578125   3789.84375    3962.109375   4134.375
   4306.640625   4478.90625    4651.171875   4823.4375     4995.703125
   5167.96875    5340.234375   5512.5        5684.765625   5857.03125
   6029.296875   6201.5625     6373.828125   6546.09375    6718.359375
   6890.625      7062.890625   7235.15625    7407.421875   7579.6875
   7751.953125   7924.21875    8096.484375   8268.75       8441.015625
   8613.28125    8785.546875   8957.8125     9130.078125   9302.34375
   9474.609375   9646.875      9819.140625   9991.40625   10163.671875
  10335.9375    10508.203125  10680.46875   10852.734375  11025.
  11197.265625  11369.53125   11541.796875  11714.0625    11886.328125
  12058.59375   12230.859375  12403.125     12575.390625  12747.65625
  12919.921875  13092.1875    13264.453125  13436.71875   13608.984375
  13781.25      13953.515625  14125.78125   14298.046875  14470.3125
  14642.578125  14814.84375   14987.109375  15159.375     15331.640625
  15503.90625   15676.171875  15848.4375    16020.703125  16192.96875
  16365.234375  16537.5       16709.765625  16882.03125   17054.296875
  17226.5625    17398.828125  17571.09375   17743.359375  17915.625
  18087.890625  18260.15625   18432.421875  18604.6875    18776.953125
  18949.21875   19121.484375  19293.75      19466.015625  19638.28125
  19810.546875  19982.8125    20155.078125  20327.34375   20499.609375
  20671.875     20844.140625  21016.40625   21188.671875  21360.9375
  21533.203125  21705.46875   21877.734375  22050.      ]
(3155,)
[  2.90249433e-03   7.98185941e-03   1.30612245e-02 ...,   1.60130612e+01
   1.60181406e+01   1.60232200e+01]
(129, 3155)
Out[29]:
<matplotlib.text.Text at 0x20a839eee80>

In [17]:
def to_mel(freq):
    return 1125 * math.log(1 + freq/700)

winlen = 0.046
winstep = 0.01
lowfreq = 27.5
highfreq = 16000
# features, _ = fbank(samples, sample_rate, nfilt=80)
features, _ = fbank(samples, sample_rate, winlen=winlen, winstep=winstep, nfilt=80, lowfreq=lowfreq, highfreq=highfreq, preemph=0)
# features = logfbank(samples, sample_rate, winstep=winstep, nfilt=80, lowfreq=lowfreq, highfreq=highfreq, preemph=0)
t_len, f_len = features.shape
mel_lowfreq = to_mel(lowfreq)
mel_highfreq = to_mel(highfreq)
f = np.arange(mel_lowfreq, mel_highfreq, (mel_highfreq - mel_lowfreq)/f_len)
t = np.arange(0.0, t_len*winstep, winstep)

plt.figure(figsize=(10,10))
plt.pcolormesh(t, f, features.T, cmap='RdBu')
plt.ylabel('Frequency [Mel]')
plt.xlabel('Time [sec]')


Out[17]:
<matplotlib.text.Text at 0x1be01226a20>

In [58]:
plt.matshow(features.T, origin='lower')


Out[58]:
<matplotlib.image.AxesImage at 0x20a98a22fd0>

In [19]:
plt.plot(np.absolute(samples))


Out[19]:
[<matplotlib.lines.Line2D at 0x20aea0fa278>]

In [2]:
samples, sample_rate = soundfile.read(r'data\IDMT-SMT-GUITAR_V2\dataset2\audio\LP_Lick8_KN.wav')
print(samples.shape)
print(sample_rate)


(657232,)
44100

In [8]:
def to_mel(freq):
    return 1125 * math.log(1 + freq/700)

winlen = 0.046
winstep = 0.01
lowfreq = 27.5
highfreq = 16000

for nfft in [512, 1024, 2048, 4096]:
    # features, _ = fbank(samples, sample_rate, nfilt=80)
    # features, _ = fbank(samples, sample_rate, winlen=winlen, winstep=winstep, nfilt=80, nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, preemph=0)
    features = logfbank(samples, sample_rate, winlen=winlen, winstep=winstep, nfilt=80, nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, preemph=0)
    t_len, f_len = features.shape
    mel_lowfreq = to_mel(lowfreq)
    mel_highfreq = to_mel(highfreq)
    f = np.arange(mel_lowfreq, mel_highfreq, (mel_highfreq - mel_lowfreq)/f_len)
    t = np.arange(0.0, t_len*winstep, winstep)

    plt.figure(figsize=(10,10))
    plt.pcolormesh(t, f, features.T, cmap='RdBu')
    plt.ylabel('Frequency [Mel]')
    plt.xlabel('Time [sec]')



In [45]:
for j in range(features.shape[1]):
    print(str(j) + ' ' + str(features[:, j].mean()))


0 0.00846905107682
1 0.012509268175
2 0.109920004448
3 0.121226440889
4 2.22044604925e-16
5 0.124424883045
6 0.229177569573
7 0.167832682587
8 0.0989874644363
9 0.0760293318726
10 0.162408544505
11 0.0779480567678
12 0.125057948133
13 0.0604417465413
14 0.0279902610011
15 0.0119065347092
16 0.00505722484522
17 0.00288380917565
18 0.00180589753228
19 0.00113410473765
20 0.000742650644601
21 0.000691578039833
22 0.000510261579023
23 0.00038534596173
24 0.000356908464057
25 0.000289757144556
26 0.000242126201982
27 0.000229476285399
28 0.000235264929197
29 0.000198254305356
30 0.000181491678347
31 0.00016862104101
32 0.00014440376332
33 0.000143692758428
34 0.000132544087732
35 0.000122410588463
36 0.000113758089678
37 0.000110590088998
38 0.000102924919833
39 9.60875321525e-05
40 9.83139374522e-05
41 9.60516157077e-05

In [47]:
for j in range(features.shape[1]):
    print(str(j) + ' ' + str(features[:, j].mean()))


0 0.00846905107682
1 0.012509268175
2 0.109920004448
3 0.121226440889
4 0.124424883045
5 0.178417774877
6 0.10151958939
7 0.117072887892
8 0.0989874644363
9 0.143134764722
10 0.123500790462
11 0.105867326086
12 0.0817650518943
13 0.0476176946562
14 0.0279902610011
15 0.0119065347092
16 0.0058628537748
17 0.00283804582149
18 0.00161800356748
19 0.001031534069
20 0.000705041533134
21 0.000499324204019
22 0.000445479987077
23 0.000398975061579
24 0.000320452602069
25 0.000293064046532
26 0.000268355969962
27 0.000223720753439
28 0.000206707036796
29 0.000190167200047
30 0.000162017477958
31 0.000161561686359
32 0.000148715292673
33 0.00013572493359
34 0.000132509133194
35 0.000115403276522
36 0.000106881414547
37 0.000109093427884
38 0.000105552124339
39 9.83139374522e-05
40 9.60516157077e-05

2D ConvNet


In [3]:
def transform_X(X_part, n_frames, frame_rate_hz, sample_rate, log_transform_magnitudes=True,
                winlen=0.046, nfilt=80, nfft=2048,
                lowfreq=27.5, highfreq=16000, preemph=0):
    """Last (winlen - winstep) seconds will be cut off"""
    
    winstep = 1 / frame_rate_hz
    samples = X_part.ravel()
    if log_transform_magnitudes:
        filterbank = logfbank(samples, sample_rate, winlen=winlen, winstep=winstep, nfilt=nfilt,
                              nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, preemph=preemph)
    else:
        filterbank, _ = fbank(samples, sample_rate, winlen=winlen, winstep=winstep, nfilt=nfilt,
                              nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, preemph=preemph)
    
    if n_frames is None:
        n_frames = filterbank.shape[0]
    return filterbank[:n_frames, :], n_frames

def transform_labels(y_part, y_actual_onset_only_part, ds_label, n_frames):
    ds_labels_part = np.empty(n_frames, dtype=np.int8)
    ds_labels_part.fill(ds_label)
    return y_part[:n_frames], y_actual_onset_only_part[:n_frames], ds_labels_part

def transform_all(X_parts, y_parts, y_actual_onset_only_parts, ds_labels, frame_rate_hz, sample_rate):
    n_frames_list = [None] * len(X_parts)
    X_channels = []
    # Create 3 channels with different window length.
    # Make sure to run the largest window first which cuts off the most at the end of the file.
    # Return and reuse the number of frames for each part = each file for the other nfft values.
    for winlen, nfft in sorted(
        # [(0.023, 1024), (0.046, 2048), (0.092, 4096)],
        [(0.046, 2048)],
        
        key=lambda t: t[1], reverse=True
    ):
        transformed = [transform_X(X_part, n_frames, frame_rate_hz, sample_rate, winlen=winlen, nfft=nfft)
                       for X_part, n_frames
                       in zip(X_parts, n_frames_list)]
        X = np.concatenate([t[0] for t in transformed])
        n_frames_list = [t[1] for t in transformed]
        X_channels.append(X)
    
    transformed = [transform_labels(y_part, y_actual_onset_only_part, ds_label, n_frames)
                   for y_part, y_actual_onset_only_part, ds_label, n_frames
                   in zip(y_parts, y_actual_onset_only_parts, ds_labels, n_frames_list)]
    y = np.concatenate([t[0] for t in transformed])
    y_actual_onset_only = np.concatenate([t[1] for t in transformed])
    ds_labels_flat = np.concatenate([t[2] for t in transformed])
    
    return X_channels, y, y_actual_onset_only, ds_labels_flat

def get_X_with_context_frames(X, c=7, border_value=0.0):
    """Return new X with new dimensions (X.shape[0] = n_samples, 2*c + 1, X.shape[1] = filterbank_size)
    
    One entry of X_new consists of c frames of context before the current frame,
    the current frame and another c frames of context after the current frame.
    """
    
    n_samples = X.shape[0]
    filterbank_size = X.shape[1]
    X_new = np.empty((n_samples, 2*c + 1, filterbank_size))
    for i in range(n_samples):
        for offset in range(-c, c + 1):
            if i + offset > -1 and i + offset < n_samples:
                # X_new 2nd dim: [0, 2*c + 1[
                # X 1st dim: [i-c, i+c+1[
                X_new[i, offset + c, :] = X[i + offset, :]
            else:
                X_new[i, offset + c].fill(border_value)
    return X_new

def create_model(input_shape):
    model = Sequential()
    
    model.add(Conv2D(10, (7, 3), padding='valid', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(1, 3)))
    model.add(Conv2D(20, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(1, 3)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return model

def prepare_data(
    X_parts_train, y_parts_train, y_actual_onset_only_parts_train, ds_labels_train,
    X_parts_test, y_parts_test, y_actual_onset_only_parts_test, ds_labels_test,
    frame_rate_hz, sample_rate
):
    print('Creating spectrograms')
    X_channels_train, y_train, y_actual_onset_only_train, ds_labels_flat_train = transform_all(
        X_parts_train, y_parts_train, y_actual_onset_only_parts_train, ds_labels_train, frame_rate_hz, sample_rate
    )
    X_channels_test, y_test, y_actual_onset_only_test, ds_labels_flat_test = transform_all(
        X_parts_test, y_parts_test, y_actual_onset_only_parts_test, ds_labels_test, frame_rate_hz, sample_rate
    )
    
    print('Spectrogram data infos:')
    for X_channels, y, y_actual_onset_only, ds_labels_flat in [
        (X_channels_train, y_train, y_actual_onset_only_train, ds_labels_flat_train),
        (X_channels_test, y_test, y_actual_onset_only_test, ds_labels_flat_test),
    ]:
        for X in X_channels:
            print(X.shape)
        print(y.shape)
        print(y.sum())
        print(y_actual_onset_only.shape)
        print(y_actual_onset_only.sum())
        print(ds_labels_flat.shape)
        print(ds_labels_flat.mean())
    for X_train, X_test in zip(X_channels_train, X_channels_test):
        print(X_train.shape)
        print(X_test.shape)
        print(X_train.mean())
        print(X_train.std())
        print(X_test.mean())
        print(X_test.std())
        print('')
    
    print('Standardizing (each band separately)')
    for X_train, X_test in zip(X_channels_train, X_channels_test):
        for j in range(X_train.shape[1]):
            ss = StandardScaler()
            X_train[:, j:j+1] = ss.fit_transform(X_train[:, j:j+1])
            X_test[:, j:j+1] = ss.transform(X_test[:, j:j+1])
    
    print('Data after standardizing:')
    for X_train, X_test in zip(X_channels_train, X_channels_test):
        print(X_train.shape)
        print(X_test.shape)
        print(X_train.mean())
        print(X_train.std())
        print(X_test.mean())
        print(X_test.std())
        print('')
    for i in range(len(X_channels_train)):
        X_channels_train[i] = get_X_with_context_frames(X_channels_train[i])
        X_channels_test[i] = get_X_with_context_frames(X_channels_test[i])
        print(X_channels_train[i].shape)
        print(X_channels_test[i].shape)
    
    print('Reshaping data')
    img_rows, img_cols = (X_channels_train[0].shape[1], X_channels_train[0].shape[2])
    for i in range(len(X_channels_train)):
        # Theano is 3 times faster with channels_first vs. channels_last on MNIST, so this setting matters.
        # "image_data_format": "channels_first" @ %USERPROFILE%/.keras/keras.json
        if K.image_data_format() == 'channels_first':
            X_channels_train[i] = X_channels_train[i].reshape(X_channels_train[i].shape[0], 1, img_rows, img_cols)
            X_channels_test[i] = X_channels_test[i].reshape(X_channels_test[i].shape[0], 1, img_rows, img_cols)
        else:
            X_channels_train[i] = X_channels_train[i].reshape(X_channels_train[i].shape[0], img_rows, img_cols, 1)
            X_channels_test[i] = X_channels_test[i].reshape(X_channels_test[i].shape[0], img_rows, img_cols, 1)
        print(X_channels_train[i].shape)
        print(X_channels_test[i].shape)

    if K.image_data_format() == 'channels_first':
        input_shape = (len(X_channels_train), img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, len(X_channels_train))
    print(input_shape)
    
    print('Concatenating channels')
    X_train = np.concatenate(X_channels_train, axis=1)
    X_test = np.concatenate(X_channels_test, axis=1)
    print(X_train.shape)
    print(X_test.shape)
    
    return (X_train, y_train, y_actual_onset_only_train, ds_labels_flat_train,
            X_test, y_test, y_actual_onset_only_test, ds_labels_flat_test,
            input_shape)

def fit_predict(X_train, y_train, y_actual_onset_only_train, X_test, y_test, y_actual_onset_only_test, input_shape):
    clf = KerasClassifier(
        build_fn=create_model,
        batch_size=1024, epochs=500,
        validation_data=(X_test, y_test),
        callbacks=[EarlyStopping(monitor='loss', patience=5)],
        input_shape=input_shape
    )
    clf.fit(X_train, y_train, verbose=2)
    y_train_predicted = clf.predict(X_train).ravel()
    y_test_predicted = clf.predict(X_test).ravel()

    model = clf.model
    # for layer in model.layers:
        # print('layer config:')
        # print(layer.get_config())
        # print('input shape: ' + str(layer.input_shape))
        # print('output shape: ' + str(layer.output_shape))
    print('TRAIN')
    print(classification_report(y_train, y_train_predicted))
    print(onset_metric(y_train, y_actual_onset_only_train, y_train_predicted, n_tolerance_frames_plus_minus=2))
    print(onset_metric(y_train, y_actual_onset_only_train, y_train_predicted, n_tolerance_frames_plus_minus=5))
    print('TEST')
    print(classification_report(y_test, y_test_predicted))
    print(onset_metric(y_test, y_actual_onset_only_test, y_test_predicted, n_tolerance_frames_plus_minus=2))
    print(onset_metric(y_test, y_actual_onset_only_test, y_test_predicted, n_tolerance_frames_plus_minus=5))
    print('')
    
    return clf

In [4]:
# active_datasets = {1, 2, 3, 4}
# active_datasets = {2}
# active_datasets = {4}
active_datasets = {1, 2}
frame_rate_hz = 100
expected_sample_rate = 44100
subsampling_step = 1
X_parts, y_parts, y_actual_onset_only_parts, ds_labels = read_data(
    active_datasets, frame_rate_hz, expected_sample_rate, subsampling_step
)


D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping AR_Lick11_FN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping AR_Lick11_KN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping AR_Lick11_MN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:189: UserWarning: Skipping data\IDMT-SMT-GUITAR_V2\dataset2\audio\desktop.ini, not a .wav file.
  warn('Skipping ' + path_to_wav + ', not a .wav file.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping FS_Lick11_FN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping FS_Lick11_KN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping FS_Lick11_MN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping LP_Lick11_FN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping LP_Lick11_KN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')
D:\Users\Michel\Documents\FH\module\8_IP6\git\onset_detection\read_data.py:187: UserWarning: Skipping LP_Lick11_MN.wav, no truth found.
  warn('Skipping ' + wav_file + ', no truth found.')

Fit / predict


In [ ]:
X_parts_train, X_parts_test, y_parts_train, y_parts_test, y_actual_onset_only_parts_train, y_actual_onset_only_parts_test, ds_labels_train, ds_labels_test = train_test_split(
    X_parts, y_parts, y_actual_onset_only_parts, ds_labels, test_size=0.2, random_state=42
)
X_parts = None
y_parts = None
y_actual_onset_only_parts = None
ds_labels = None

(
    X_train, y_train, y_actual_onset_only_train, ds_labels_flat_train,
    X_test, y_test, y_actual_onset_only_test, ds_labels_flat_test,
    input_shape
) = prepare_data(
    X_parts_train, y_parts_train, y_actual_onset_only_parts_train, ds_labels_train,
    X_parts_test, y_parts_test, y_actual_onset_only_parts_test, ds_labels_test,
    frame_rate_hz, expected_sample_rate
)
X_parts_train = None
X_parts_test = None
y_parts_train = None
y_parts_test = None
y_actual_onset_only_parts_train = None
y_actual_onset_only_parts_test = None
ds_labels_train = None
ds_labels_test = None

clf = fit_predict(X_train, y_train, y_actual_onset_only_train, X_test, y_test, y_actual_onset_only_test, input_shape)

CV on dataset 1 + 2


In [5]:
ds12 = [t for t
        in zip(X_parts, y_parts, y_actual_onset_only_parts, ds_labels) if t[3] == 1 or t[3] == 2]
# ds34 = [t for t
#         in zip(X_parts, y_parts, y_actual_onset_only_parts, ds_labels) if t[3] != 1 and t[3] != 2]
X_parts = None
y_parts = None
y_actual_onset_only_parts = None
ds_labels = None

k_fold = KFold(n_splits=5, shuffle=True, random_state=42)
clfs = []
for train_indices, test_indices in k_fold.split(ds12):
    X_parts_train = [t[0] for t in [ds12[i] for i in train_indices]]#  + [t[0] for t in ds34]
    y_parts_train = [t[1] for t in [ds12[i] for i in train_indices]]#  + [t[1] for t in ds34]
    y_actual_onset_only_parts_train = [t[2] for t in [ds12[i] for i in train_indices]]#  + [t[2] for t in ds34]
    ds_labels_train = [t[3] for t in [ds12[i] for i in train_indices]]#  + [t[3] for t in ds34]
    X_parts_test = [t[0] for t in [ds12[i] for i in test_indices]]
    y_parts_test = [t[1] for t in [ds12[i] for i in test_indices]]
    y_actual_onset_only_parts_test = [t[2] for t in [ds12[i] for i in test_indices]]
    ds_labels_test = [t[3] for t in [ds12[i] for i in test_indices]]
    
    (
        X_train, y_train, y_actual_onset_only_train, ds_labels_flat_train,
        X_test, y_test, y_actual_onset_only_test, ds_labels_flat_test,
        input_shape
    ) = prepare_data(
        X_parts_train, y_parts_train, y_actual_onset_only_parts_train, ds_labels_train,
        X_parts_test, y_parts_test, y_actual_onset_only_parts_test, ds_labels_test,
        frame_rate_hz, expected_sample_rate
    )
    X_parts_train = None
    X_parts_test = None
    y_parts_train = None
    y_parts_test = None
    y_actual_onset_only_parts_train = None
    y_actual_onset_only_parts_test = None
    ds_labels_train = None
    ds_labels_test = None

    clf = fit_predict(X_train, y_train, y_actual_onset_only_train, X_test, y_test, y_actual_onset_only_test, input_shape)
    clfs.append(clf)


Creating spectrograms
Spectrogram data infos:
(354959, 80)
(354959,)
10706
(354959,)
3803
(354959,)
1.78630207996
(86329, 80)
(86329,)
2159
(86329,)
775
(86329,)
1.76515423554
(354959, 80)
(86329, 80)
-12.4376983605
6.44601379577
-12.6196319939
6.65364390715

Standardizing (each band separately)
Data after standardizing:
(354959, 80)
(86329, 80)
-1.73592629135e-16
1.0
-0.0303297910017
1.03965938446

(354959, 15, 80)
(86329, 15, 80)
Reshaping data
(354959, 1, 15, 80)
(86329, 1, 15, 80)
(1, 15, 80)
Concatenating channels
(354959, 1, 15, 80)
(86329, 1, 15, 80)
Train on 354959 samples, validate on 86329 samples
Epoch 1/500
11s - loss: 0.0577 - acc: 0.9799 - val_loss: 0.0346 - val_acc: 0.9864
Epoch 2/500
10s - loss: 0.0366 - acc: 0.9851 - val_loss: 0.0315 - val_acc: 0.9876
Epoch 3/500
11s - loss: 0.0341 - acc: 0.9863 - val_loss: 0.0266 - val_acc: 0.9887
Epoch 4/500
11s - loss: 0.0318 - acc: 0.9870 - val_loss: 0.0251 - val_acc: 0.9891
Epoch 5/500
11s - loss: 0.0306 - acc: 0.9876 - val_loss: 0.0242 - val_acc: 0.9899
Epoch 6/500
11s - loss: 0.0297 - acc: 0.9879 - val_loss: 0.0249 - val_acc: 0.9900
Epoch 7/500
11s - loss: 0.0285 - acc: 0.9884 - val_loss: 0.0233 - val_acc: 0.9904
Epoch 8/500
11s - loss: 0.0277 - acc: 0.9886 - val_loss: 0.0233 - val_acc: 0.9905
Epoch 9/500
11s - loss: 0.0272 - acc: 0.9887 - val_loss: 0.0219 - val_acc: 0.9908
Epoch 10/500
11s - loss: 0.0267 - acc: 0.9891 - val_loss: 0.0221 - val_acc: 0.9907
Epoch 11/500
11s - loss: 0.0262 - acc: 0.9895 - val_loss: 0.0219 - val_acc: 0.9909
Epoch 12/500
11s - loss: 0.0256 - acc: 0.9896 - val_loss: 0.0216 - val_acc: 0.9913
Epoch 13/500
11s - loss: 0.0252 - acc: 0.9895 - val_loss: 0.0219 - val_acc: 0.9911
Epoch 14/500
11s - loss: 0.0251 - acc: 0.9897 - val_loss: 0.0210 - val_acc: 0.9914
Epoch 15/500
11s - loss: 0.0248 - acc: 0.9899 - val_loss: 0.0212 - val_acc: 0.9915
Epoch 16/500
11s - loss: 0.0239 - acc: 0.9902 - val_loss: 0.0215 - val_acc: 0.9908
Epoch 17/500
11s - loss: 0.0238 - acc: 0.9901 - val_loss: 0.0205 - val_acc: 0.9918
Epoch 18/500
11s - loss: 0.0235 - acc: 0.9904 - val_loss: 0.0206 - val_acc: 0.9916
Epoch 19/500
11s - loss: 0.0235 - acc: 0.9905 - val_loss: 0.0209 - val_acc: 0.9916
Epoch 20/500
11s - loss: 0.0230 - acc: 0.9904 - val_loss: 0.0203 - val_acc: 0.9917
Epoch 21/500
11s - loss: 0.0222 - acc: 0.9908 - val_loss: 0.0207 - val_acc: 0.9920
Epoch 22/500
11s - loss: 0.0223 - acc: 0.9907 - val_loss: 0.0213 - val_acc: 0.9921
Epoch 23/500
11s - loss: 0.0221 - acc: 0.9909 - val_loss: 0.0206 - val_acc: 0.9918
Epoch 24/500
11s - loss: 0.0216 - acc: 0.9909 - val_loss: 0.0203 - val_acc: 0.9919
Epoch 25/500
11s - loss: 0.0216 - acc: 0.9911 - val_loss: 0.0203 - val_acc: 0.9919
Epoch 26/500
11s - loss: 0.0211 - acc: 0.9912 - val_loss: 0.0204 - val_acc: 0.9919
Epoch 27/500
11s - loss: 0.0209 - acc: 0.9914 - val_loss: 0.0201 - val_acc: 0.9920
Epoch 28/500
11s - loss: 0.0205 - acc: 0.9915 - val_loss: 0.0202 - val_acc: 0.9920
Epoch 29/500
11s - loss: 0.0203 - acc: 0.9917 - val_loss: 0.0204 - val_acc: 0.9922
Epoch 30/500
11s - loss: 0.0203 - acc: 0.9916 - val_loss: 0.0204 - val_acc: 0.9920
Epoch 31/500
11s - loss: 0.0199 - acc: 0.9918 - val_loss: 0.0206 - val_acc: 0.9922
Epoch 32/500
11s - loss: 0.0198 - acc: 0.9918 - val_loss: 0.0205 - val_acc: 0.9920
Epoch 33/500
11s - loss: 0.0193 - acc: 0.9920 - val_loss: 0.0204 - val_acc: 0.9921
Epoch 34/500
11s - loss: 0.0192 - acc: 0.9921 - val_loss: 0.0207 - val_acc: 0.9918
Epoch 35/500
11s - loss: 0.0188 - acc: 0.9923 - val_loss: 0.0213 - val_acc: 0.9913
Epoch 36/500
11s - loss: 0.0185 - acc: 0.9922 - val_loss: 0.0211 - val_acc: 0.9919
Epoch 37/500
11s - loss: 0.0185 - acc: 0.9923 - val_loss: 0.0213 - val_acc: 0.9916
Epoch 38/500
11s - loss: 0.0180 - acc: 0.9927 - val_loss: 0.0209 - val_acc: 0.9916
Epoch 39/500
11s - loss: 0.0178 - acc: 0.9926 - val_loss: 0.0208 - val_acc: 0.9917
Epoch 40/500
11s - loss: 0.0175 - acc: 0.9928 - val_loss: 0.0208 - val_acc: 0.9915
Epoch 41/500
11s - loss: 0.0174 - acc: 0.9929 - val_loss: 0.0211 - val_acc: 0.9918
Epoch 42/500
11s - loss: 0.0172 - acc: 0.9929 - val_loss: 0.0214 - val_acc: 0.9919
Epoch 43/500
11s - loss: 0.0173 - acc: 0.9930 - val_loss: 0.0208 - val_acc: 0.9920
Epoch 44/500
11s - loss: 0.0168 - acc: 0.9932 - val_loss: 0.0211 - val_acc: 0.9919
Epoch 45/500
11s - loss: 0.0169 - acc: 0.9931 - val_loss: 0.0208 - val_acc: 0.9920
Epoch 46/500
11s - loss: 0.0164 - acc: 0.9932 - val_loss: 0.0219 - val_acc: 0.9917
Epoch 47/500
11s - loss: 0.0161 - acc: 0.9934 - val_loss: 0.0211 - val_acc: 0.9921
Epoch 48/500
11s - loss: 0.0158 - acc: 0.9935 - val_loss: 0.0220 - val_acc: 0.9918
Epoch 49/500
11s - loss: 0.0158 - acc: 0.9935 - val_loss: 0.0213 - val_acc: 0.9917
Epoch 50/500
11s - loss: 0.0155 - acc: 0.9936 - val_loss: 0.0221 - val_acc: 0.9913
Epoch 51/500
11s - loss: 0.0153 - acc: 0.9937 - val_loss: 0.0215 - val_acc: 0.9920
Epoch 52/500
11s - loss: 0.0148 - acc: 0.9939 - val_loss: 0.0221 - val_acc: 0.9915
Epoch 53/500
11s - loss: 0.0147 - acc: 0.9940 - val_loss: 0.0231 - val_acc: 0.9912
Epoch 54/500
11s - loss: 0.0144 - acc: 0.9940 - val_loss: 0.0223 - val_acc: 0.9917
Epoch 55/500
11s - loss: 0.0147 - acc: 0.9940 - val_loss: 0.0222 - val_acc: 0.9916
Epoch 56/500
11s - loss: 0.0142 - acc: 0.9942 - val_loss: 0.0228 - val_acc: 0.9914
Epoch 57/500
11s - loss: 0.0138 - acc: 0.9944 - val_loss: 0.0226 - val_acc: 0.9917
Epoch 58/500
11s - loss: 0.0140 - acc: 0.9943 - val_loss: 0.0230 - val_acc: 0.9916
Epoch 59/500
11s - loss: 0.0135 - acc: 0.9944 - val_loss: 0.0236 - val_acc: 0.9911
Epoch 60/500
11s - loss: 0.0137 - acc: 0.9943 - val_loss: 0.0225 - val_acc: 0.9913
Epoch 61/500
11s - loss: 0.0134 - acc: 0.9945 - val_loss: 0.0247 - val_acc: 0.9915
Epoch 62/500
11s - loss: 0.0131 - acc: 0.9946 - val_loss: 0.0229 - val_acc: 0.9916
Epoch 63/500
11s - loss: 0.0135 - acc: 0.9946 - val_loss: 0.0225 - val_acc: 0.9913
Epoch 64/500
11s - loss: 0.0133 - acc: 0.9946 - val_loss: 0.0233 - val_acc: 0.9914
Epoch 65/500
11s - loss: 0.0128 - acc: 0.9947 - val_loss: 0.0226 - val_acc: 0.9916
Epoch 66/500
11s - loss: 0.0128 - acc: 0.9947 - val_loss: 0.0226 - val_acc: 0.9918
Epoch 67/500
12s - loss: 0.0126 - acc: 0.9948 - val_loss: 0.0231 - val_acc: 0.9915
Epoch 68/500
11s - loss: 0.0122 - acc: 0.9952 - val_loss: 0.0237 - val_acc: 0.9916
Epoch 69/500
12s - loss: 0.0119 - acc: 0.9952 - val_loss: 0.0232 - val_acc: 0.9916
Epoch 70/500
11s - loss: 0.0122 - acc: 0.9951 - val_loss: 0.0258 - val_acc: 0.9915
Epoch 71/500
11s - loss: 0.0120 - acc: 0.9951 - val_loss: 0.0238 - val_acc: 0.9915
Epoch 72/500
11s - loss: 0.0119 - acc: 0.9952 - val_loss: 0.0239 - val_acc: 0.9914
Epoch 73/500
11s - loss: 0.0118 - acc: 0.9951 - val_loss: 0.0241 - val_acc: 0.9918
Epoch 74/500
11s - loss: 0.0115 - acc: 0.9954 - val_loss: 0.0234 - val_acc: 0.9916
Epoch 75/500
11s - loss: 0.0114 - acc: 0.9954 - val_loss: 0.0242 - val_acc: 0.9916
Epoch 76/500
11s - loss: 0.0113 - acc: 0.9955 - val_loss: 0.0255 - val_acc: 0.9911
Epoch 77/500
11s - loss: 0.0110 - acc: 0.9955 - val_loss: 0.0243 - val_acc: 0.9917
Epoch 78/500
11s - loss: 0.0112 - acc: 0.9955 - val_loss: 0.0248 - val_acc: 0.9919
Epoch 79/500
11s - loss: 0.0108 - acc: 0.9957 - val_loss: 0.0251 - val_acc: 0.9914
Epoch 80/500
11s - loss: 0.0109 - acc: 0.9957 - val_loss: 0.0252 - val_acc: 0.9916
Epoch 81/500
11s - loss: 0.0113 - acc: 0.9955 - val_loss: 0.0254 - val_acc: 0.9916
Epoch 82/500
11s - loss: 0.0111 - acc: 0.9956 - val_loss: 0.0250 - val_acc: 0.9914
Epoch 83/500
11s - loss: 0.0103 - acc: 0.9959 - val_loss: 0.0252 - val_acc: 0.9915
Epoch 84/500
11s - loss: 0.0105 - acc: 0.9958 - val_loss: 0.0266 - val_acc: 0.9910
Epoch 85/500
11s - loss: 0.0107 - acc: 0.9957 - val_loss: 0.0245 - val_acc: 0.9915
Epoch 86/500
11s - loss: 0.0108 - acc: 0.9956 - val_loss: 0.0247 - val_acc: 0.9916
Epoch 87/500
11s - loss: 0.0100 - acc: 0.9961 - val_loss: 0.0257 - val_acc: 0.9915
Epoch 88/500
11s - loss: 0.0102 - acc: 0.9961 - val_loss: 0.0247 - val_acc: 0.9916
Epoch 89/500
11s - loss: 0.0103 - acc: 0.9959 - val_loss: 0.0249 - val_acc: 0.9913
Epoch 90/500
11s - loss: 0.0101 - acc: 0.9960 - val_loss: 0.0253 - val_acc: 0.9913
Epoch 91/500
11s - loss: 0.0099 - acc: 0.9960 - val_loss: 0.0275 - val_acc: 0.9912
Epoch 92/500
11s - loss: 0.0098 - acc: 0.9960 - val_loss: 0.0266 - val_acc: 0.9917
Epoch 93/500
11s - loss: 0.0098 - acc: 0.9960 - val_loss: 0.0274 - val_acc: 0.9910
Epoch 94/500
11s - loss: 0.0096 - acc: 0.9963 - val_loss: 0.0261 - val_acc: 0.9913
Epoch 95/500
11s - loss: 0.0097 - acc: 0.9963 - val_loss: 0.0291 - val_acc: 0.9910
Epoch 96/500
11s - loss: 0.0096 - acc: 0.9961 - val_loss: 0.0275 - val_acc: 0.9912
Epoch 97/500
11s - loss: 0.0100 - acc: 0.9961 - val_loss: 0.0300 - val_acc: 0.9910
Epoch 98/500
11s - loss: 0.0095 - acc: 0.9961 - val_loss: 0.0270 - val_acc: 0.9914
Epoch 99/500
11s - loss: 0.0094 - acc: 0.9962 - val_loss: 0.0273 - val_acc: 0.9914
Epoch 100/500
11s - loss: 0.0095 - acc: 0.9964 - val_loss: 0.0263 - val_acc: 0.9917
Epoch 101/500
11s - loss: 0.0091 - acc: 0.9963 - val_loss: 0.0271 - val_acc: 0.9914
Epoch 102/500
11s - loss: 0.0092 - acc: 0.9963 - val_loss: 0.0269 - val_acc: 0.9914
Epoch 103/500
11s - loss: 0.0092 - acc: 0.9964 - val_loss: 0.0276 - val_acc: 0.9911
Epoch 104/500
11s - loss: 0.0090 - acc: 0.9964 - val_loss: 0.0266 - val_acc: 0.9915
Epoch 105/500
11s - loss: 0.0096 - acc: 0.9963 - val_loss: 0.0264 - val_acc: 0.9915
Epoch 106/500
11s - loss: 0.0089 - acc: 0.9965 - val_loss: 0.0257 - val_acc: 0.9916
Epoch 107/500
11s - loss: 0.0089 - acc: 0.9965 - val_loss: 0.0275 - val_acc: 0.9912
Epoch 108/500
11s - loss: 0.0088 - acc: 0.9965 - val_loss: 0.0270 - val_acc: 0.9913
Epoch 109/500
11s - loss: 0.0088 - acc: 0.9966 - val_loss: 0.0284 - val_acc: 0.9914
Epoch 110/500
11s - loss: 0.0088 - acc: 0.9965 - val_loss: 0.0276 - val_acc: 0.9913
Epoch 111/500
11s - loss: 0.0086 - acc: 0.9966 - val_loss: 0.0288 - val_acc: 0.9910
Epoch 112/500
11s - loss: 0.0089 - acc: 0.9965 - val_loss: 0.0272 - val_acc: 0.9910
Epoch 113/500
11s - loss: 0.0089 - acc: 0.9964 - val_loss: 0.0289 - val_acc: 0.9912
Epoch 114/500
11s - loss: 0.0087 - acc: 0.9966 - val_loss: 0.0298 - val_acc: 0.9914
Epoch 115/500
11s - loss: 0.0089 - acc: 0.9964 - val_loss: 0.0292 - val_acc: 0.9912
Epoch 116/500
11s - loss: 0.0083 - acc: 0.9967 - val_loss: 0.0305 - val_acc: 0.9911
Epoch 117/500
11s - loss: 0.0087 - acc: 0.9966 - val_loss: 0.0265 - val_acc: 0.9911
Epoch 118/500
11s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0280 - val_acc: 0.9910
Epoch 119/500
11s - loss: 0.0083 - acc: 0.9967 - val_loss: 0.0277 - val_acc: 0.9915
Epoch 120/500
11s - loss: 0.0087 - acc: 0.9966 - val_loss: 0.0281 - val_acc: 0.9913
Epoch 121/500
11s - loss: 0.0085 - acc: 0.9966 - val_loss: 0.0289 - val_acc: 0.9912
Epoch 122/500
11s - loss: 0.0082 - acc: 0.9968 - val_loss: 0.0288 - val_acc: 0.9910
Epoch 123/500
11s - loss: 0.0083 - acc: 0.9967 - val_loss: 0.0295 - val_acc: 0.9909
Epoch 124/500
11s - loss: 0.0085 - acc: 0.9966 - val_loss: 0.0279 - val_acc: 0.9910
Epoch 125/500
11s - loss: 0.0084 - acc: 0.9968 - val_loss: 0.0277 - val_acc: 0.9914
Epoch 126/500
11s - loss: 0.0084 - acc: 0.9967 - val_loss: 0.0286 - val_acc: 0.9913
Epoch 127/500
11s - loss: 0.0084 - acc: 0.9967 - val_loss: 0.0305 - val_acc: 0.9911
Epoch 128/500
11s - loss: 0.0080 - acc: 0.9969 - val_loss: 0.0292 - val_acc: 0.9913
Epoch 129/500
11s - loss: 0.0083 - acc: 0.9967 - val_loss: 0.0275 - val_acc: 0.9914
Epoch 130/500
11s - loss: 0.0084 - acc: 0.9967 - val_loss: 0.0280 - val_acc: 0.9916
Epoch 131/500
11s - loss: 0.0084 - acc: 0.9966 - val_loss: 0.0289 - val_acc: 0.9913
Epoch 132/500
11s - loss: 0.0081 - acc: 0.9968 - val_loss: 0.0269 - val_acc: 0.9912
Epoch 133/500
11s - loss: 0.0077 - acc: 0.9970 - val_loss: 0.0287 - val_acc: 0.9911
Epoch 134/500
11s - loss: 0.0079 - acc: 0.9970 - val_loss: 0.0302 - val_acc: 0.9911
Epoch 135/500
11s - loss: 0.0077 - acc: 0.9970 - val_loss: 0.0314 - val_acc: 0.9912
Epoch 136/500
11s - loss: 0.0080 - acc: 0.9968 - val_loss: 0.0288 - val_acc: 0.9915
Epoch 137/500
11s - loss: 0.0081 - acc: 0.9968 - val_loss: 0.0281 - val_acc: 0.9915
Epoch 138/500
11s - loss: 0.0080 - acc: 0.9970 - val_loss: 0.0287 - val_acc: 0.9909
Epoch 139/500
11s - loss: 0.0081 - acc: 0.9969 - val_loss: 0.0294 - val_acc: 0.9911
82944/86329 [===========================>..] - ETA: 0sTRAIN
             precision    recall  f1-score   support

          0       1.00      1.00      1.00    344253
          1       1.00      1.00      1.00     10706

avg / total       1.00      1.00      1.00    354959

TP=3802, FN=1, FP=8
precision=0.997900262467, recall=0.9997370496976071, F1=0.998817811638

TP=3802, FN=1, FP=6
precision=0.998424369748, recall=0.9997370496976071, F1=0.999080278544

TEST
             precision    recall  f1-score   support

          0       1.00      1.00      1.00     84170
          1       0.82      0.82      0.82      2159

avg / total       0.99      0.99      0.99     86329

TP=758, FN=17, FP=128
precision=0.855530474041, recall=0.9780645161290322, F1=0.912703190849

TP=770, FN=5, FP=28
precision=0.964912280702, recall=0.9935483870967742, F1=0.979020979021


Creating spectrograms
Spectrogram data infos:
(367904, 80)
(367904,)
10501
(367904,)
3726
(367904,)
1.79598210403
(73384, 80)
(73384,)
2364
(73384,)
852
(73384,)
1.71289381882
(367904, 80)
(73384, 80)
-12.5323608389
6.46256944971
-12.1771435283
6.60344425027

Standardizing (each band separately)
Data after standardizing:
(367904, 80)
(73384, 80)
-1.22492450402e-16
1.0
0.0599141887665
1.02456516696

(367904, 15, 80)
(73384, 15, 80)
Reshaping data
(367904, 1, 15, 80)
(73384, 1, 15, 80)
(1, 15, 80)
Concatenating channels
(367904, 1, 15, 80)
(73384, 1, 15, 80)
Train on 367904 samples, validate on 73384 samples
Epoch 1/500
11s - loss: 0.0566 - acc: 0.9797 - val_loss: 0.0415 - val_acc: 0.9833
Epoch 2/500
11s - loss: 0.0351 - acc: 0.9857 - val_loss: 0.0376 - val_acc: 0.9846
Epoch 3/500
11s - loss: 0.0325 - acc: 0.9867 - val_loss: 0.0351 - val_acc: 0.9854
Epoch 4/500
11s - loss: 0.0311 - acc: 0.9873 - val_loss: 0.0338 - val_acc: 0.9860
Epoch 5/500
11s - loss: 0.0299 - acc: 0.9877 - val_loss: 0.0318 - val_acc: 0.9872
Epoch 6/500
11s - loss: 0.0288 - acc: 0.9883 - val_loss: 0.0311 - val_acc: 0.9873
Epoch 7/500
11s - loss: 0.0278 - acc: 0.9887 - val_loss: 0.0312 - val_acc: 0.9872
Epoch 8/500
11s - loss: 0.0271 - acc: 0.9890 - val_loss: 0.0292 - val_acc: 0.9882
Epoch 9/500
11s - loss: 0.0263 - acc: 0.9894 - val_loss: 0.0286 - val_acc: 0.9886
Epoch 10/500
11s - loss: 0.0260 - acc: 0.9896 - val_loss: 0.0281 - val_acc: 0.9886
Epoch 11/500
11s - loss: 0.0252 - acc: 0.9898 - val_loss: 0.0284 - val_acc: 0.9886
Epoch 12/500
11s - loss: 0.0248 - acc: 0.9899 - val_loss: 0.0306 - val_acc: 0.9876
Epoch 13/500
11s - loss: 0.0245 - acc: 0.9901 - val_loss: 0.0278 - val_acc: 0.9889
Epoch 14/500
11s - loss: 0.0240 - acc: 0.9903 - val_loss: 0.0277 - val_acc: 0.9890
Epoch 15/500
11s - loss: 0.0236 - acc: 0.9904 - val_loss: 0.0278 - val_acc: 0.9888
Epoch 16/500
11s - loss: 0.0233 - acc: 0.9905 - val_loss: 0.0276 - val_acc: 0.9891
Epoch 17/500
11s - loss: 0.0228 - acc: 0.9907 - val_loss: 0.0282 - val_acc: 0.9888
Epoch 18/500
11s - loss: 0.0225 - acc: 0.9908 - val_loss: 0.0267 - val_acc: 0.9892
Epoch 19/500
11s - loss: 0.0223 - acc: 0.9909 - val_loss: 0.0288 - val_acc: 0.9885
Epoch 20/500
11s - loss: 0.0220 - acc: 0.9913 - val_loss: 0.0268 - val_acc: 0.9894
Epoch 21/500
11s - loss: 0.0221 - acc: 0.9910 - val_loss: 0.0266 - val_acc: 0.9894
Epoch 22/500
11s - loss: 0.0211 - acc: 0.9915 - val_loss: 0.0277 - val_acc: 0.9891
Epoch 23/500
11s - loss: 0.0209 - acc: 0.9914 - val_loss: 0.0273 - val_acc: 0.9897
Epoch 24/500
11s - loss: 0.0206 - acc: 0.9917 - val_loss: 0.0276 - val_acc: 0.9897
Epoch 25/500
11s - loss: 0.0205 - acc: 0.9917 - val_loss: 0.0268 - val_acc: 0.9900
Epoch 26/500
11s - loss: 0.0201 - acc: 0.9918 - val_loss: 0.0264 - val_acc: 0.9896
Epoch 27/500
11s - loss: 0.0198 - acc: 0.9920 - val_loss: 0.0273 - val_acc: 0.9898
Epoch 28/500
11s - loss: 0.0195 - acc: 0.9920 - val_loss: 0.0271 - val_acc: 0.9896
Epoch 29/500
11s - loss: 0.0193 - acc: 0.9924 - val_loss: 0.0297 - val_acc: 0.9892
Epoch 30/500
11s - loss: 0.0189 - acc: 0.9923 - val_loss: 0.0271 - val_acc: 0.9897
Epoch 31/500
11s - loss: 0.0186 - acc: 0.9924 - val_loss: 0.0281 - val_acc: 0.9896
Epoch 32/500
11s - loss: 0.0183 - acc: 0.9925 - val_loss: 0.0273 - val_acc: 0.9899
Epoch 33/500
11s - loss: 0.0178 - acc: 0.9925 - val_loss: 0.0273 - val_acc: 0.9897
Epoch 34/500
11s - loss: 0.0176 - acc: 0.9930 - val_loss: 0.0280 - val_acc: 0.9897
Epoch 35/500
11s - loss: 0.0175 - acc: 0.9928 - val_loss: 0.0281 - val_acc: 0.9895
Epoch 36/500
11s - loss: 0.0170 - acc: 0.9932 - val_loss: 0.0286 - val_acc: 0.9899
Epoch 37/500
11s - loss: 0.0172 - acc: 0.9931 - val_loss: 0.0285 - val_acc: 0.9892
Epoch 38/500
11s - loss: 0.0168 - acc: 0.9933 - val_loss: 0.0291 - val_acc: 0.9894
Epoch 39/500
11s - loss: 0.0165 - acc: 0.9934 - val_loss: 0.0307 - val_acc: 0.9887
Epoch 40/500
11s - loss: 0.0164 - acc: 0.9935 - val_loss: 0.0287 - val_acc: 0.9899
Epoch 41/500
11s - loss: 0.0162 - acc: 0.9934 - val_loss: 0.0298 - val_acc: 0.9891
Epoch 42/500
11s - loss: 0.0158 - acc: 0.9938 - val_loss: 0.0284 - val_acc: 0.9895
Epoch 43/500
11s - loss: 0.0155 - acc: 0.9940 - val_loss: 0.0297 - val_acc: 0.9895
Epoch 44/500
11s - loss: 0.0153 - acc: 0.9939 - val_loss: 0.0282 - val_acc: 0.9897
Epoch 45/500
11s - loss: 0.0150 - acc: 0.9940 - val_loss: 0.0302 - val_acc: 0.9894
Epoch 46/500
11s - loss: 0.0146 - acc: 0.9941 - val_loss: 0.0297 - val_acc: 0.9896
Epoch 47/500
11s - loss: 0.0145 - acc: 0.9942 - val_loss: 0.0293 - val_acc: 0.9895
Epoch 48/500
11s - loss: 0.0143 - acc: 0.9942 - val_loss: 0.0304 - val_acc: 0.9894
Epoch 49/500
11s - loss: 0.0144 - acc: 0.9942 - val_loss: 0.0291 - val_acc: 0.9893
Epoch 50/500
11s - loss: 0.0142 - acc: 0.9943 - val_loss: 0.0297 - val_acc: 0.9897
Epoch 51/500
11s - loss: 0.0136 - acc: 0.9946 - val_loss: 0.0293 - val_acc: 0.9897
Epoch 52/500
11s - loss: 0.0135 - acc: 0.9945 - val_loss: 0.0321 - val_acc: 0.9897
Epoch 53/500
11s - loss: 0.0135 - acc: 0.9946 - val_loss: 0.0305 - val_acc: 0.9896
Epoch 54/500
11s - loss: 0.0131 - acc: 0.9947 - val_loss: 0.0315 - val_acc: 0.9890
Epoch 55/500
11s - loss: 0.0131 - acc: 0.9948 - val_loss: 0.0317 - val_acc: 0.9891
Epoch 56/500
11s - loss: 0.0129 - acc: 0.9947 - val_loss: 0.0310 - val_acc: 0.9893
Epoch 57/500
11s - loss: 0.0127 - acc: 0.9951 - val_loss: 0.0331 - val_acc: 0.9892
Epoch 58/500
11s - loss: 0.0126 - acc: 0.9950 - val_loss: 0.0329 - val_acc: 0.9895
Epoch 59/500
11s - loss: 0.0127 - acc: 0.9949 - val_loss: 0.0312 - val_acc: 0.9895
Epoch 60/500
11s - loss: 0.0122 - acc: 0.9951 - val_loss: 0.0313 - val_acc: 0.9892
Epoch 61/500
11s - loss: 0.0120 - acc: 0.9954 - val_loss: 0.0335 - val_acc: 0.9898
Epoch 62/500
11s - loss: 0.0119 - acc: 0.9953 - val_loss: 0.0331 - val_acc: 0.9893
Epoch 63/500
11s - loss: 0.0115 - acc: 0.9955 - val_loss: 0.0315 - val_acc: 0.9893
Epoch 64/500
11s - loss: 0.0113 - acc: 0.9955 - val_loss: 0.0352 - val_acc: 0.9895
Epoch 65/500
11s - loss: 0.0110 - acc: 0.9956 - val_loss: 0.0342 - val_acc: 0.9892
Epoch 66/500
11s - loss: 0.0114 - acc: 0.9956 - val_loss: 0.0322 - val_acc: 0.9890
Epoch 67/500
11s - loss: 0.0113 - acc: 0.9956 - val_loss: 0.0334 - val_acc: 0.9895
Epoch 68/500
11s - loss: 0.0114 - acc: 0.9955 - val_loss: 0.0327 - val_acc: 0.9892
Epoch 69/500
11s - loss: 0.0114 - acc: 0.9957 - val_loss: 0.0332 - val_acc: 0.9894
Epoch 70/500
11s - loss: 0.0108 - acc: 0.9958 - val_loss: 0.0347 - val_acc: 0.9890
Epoch 71/500
11s - loss: 0.0108 - acc: 0.9957 - val_loss: 0.0350 - val_acc: 0.9891
Epoch 72/500
11s - loss: 0.0106 - acc: 0.9959 - val_loss: 0.0344 - val_acc: 0.9897
Epoch 73/500
11s - loss: 0.0107 - acc: 0.9958 - val_loss: 0.0330 - val_acc: 0.9893
Epoch 74/500
11s - loss: 0.0103 - acc: 0.9960 - val_loss: 0.0337 - val_acc: 0.9894
Epoch 75/500
11s - loss: 0.0104 - acc: 0.9958 - val_loss: 0.0352 - val_acc: 0.9893
Epoch 76/500
11s - loss: 0.0102 - acc: 0.9959 - val_loss: 0.0348 - val_acc: 0.9892
Epoch 77/500
11s - loss: 0.0102 - acc: 0.9961 - val_loss: 0.0335 - val_acc: 0.9891
Epoch 78/500
11s - loss: 0.0100 - acc: 0.9961 - val_loss: 0.0369 - val_acc: 0.9891
Epoch 79/500
11s - loss: 0.0098 - acc: 0.9962 - val_loss: 0.0356 - val_acc: 0.9895
Epoch 80/500
11s - loss: 0.0101 - acc: 0.9962 - val_loss: 0.0367 - val_acc: 0.9895
Epoch 81/500
11s - loss: 0.0098 - acc: 0.9962 - val_loss: 0.0337 - val_acc: 0.9896
Epoch 82/500
11s - loss: 0.0095 - acc: 0.9965 - val_loss: 0.0350 - val_acc: 0.9895
Epoch 83/500
11s - loss: 0.0095 - acc: 0.9963 - val_loss: 0.0363 - val_acc: 0.9889
Epoch 84/500
11s - loss: 0.0096 - acc: 0.9964 - val_loss: 0.0348 - val_acc: 0.9894
Epoch 85/500
11s - loss: 0.0095 - acc: 0.9964 - val_loss: 0.0362 - val_acc: 0.9895
Epoch 86/500
11s - loss: 0.0095 - acc: 0.9963 - val_loss: 0.0350 - val_acc: 0.9892
Epoch 87/500
11s - loss: 0.0095 - acc: 0.9963 - val_loss: 0.0350 - val_acc: 0.9895
Epoch 88/500
11s - loss: 0.0092 - acc: 0.9966 - val_loss: 0.0370 - val_acc: 0.9892
Epoch 89/500
11s - loss: 0.0090 - acc: 0.9965 - val_loss: 0.0380 - val_acc: 0.9893
Epoch 90/500
11s - loss: 0.0095 - acc: 0.9965 - val_loss: 0.0369 - val_acc: 0.9893
Epoch 91/500
11s - loss: 0.0090 - acc: 0.9965 - val_loss: 0.0375 - val_acc: 0.9893
Epoch 92/500
11s - loss: 0.0089 - acc: 0.9966 - val_loss: 0.0379 - val_acc: 0.9894
Epoch 93/500
11s - loss: 0.0089 - acc: 0.9966 - val_loss: 0.0383 - val_acc: 0.9890
Epoch 94/500
11s - loss: 0.0088 - acc: 0.9966 - val_loss: 0.0393 - val_acc: 0.9894
Epoch 95/500
11s - loss: 0.0094 - acc: 0.9964 - val_loss: 0.0359 - val_acc: 0.9891
Epoch 96/500
11s - loss: 0.0088 - acc: 0.9966 - val_loss: 0.0374 - val_acc: 0.9892
Epoch 97/500
11s - loss: 0.0090 - acc: 0.9966 - val_loss: 0.0365 - val_acc: 0.9892
Epoch 98/500
11s - loss: 0.0086 - acc: 0.9967 - val_loss: 0.0391 - val_acc: 0.9892
Epoch 99/500
11s - loss: 0.0086 - acc: 0.9967 - val_loss: 0.0367 - val_acc: 0.9895
Epoch 100/500
11s - loss: 0.0089 - acc: 0.9966 - val_loss: 0.0385 - val_acc: 0.9888
Epoch 101/500
11s - loss: 0.0084 - acc: 0.9969 - val_loss: 0.0383 - val_acc: 0.9890
Epoch 102/500
11s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0384 - val_acc: 0.9894
Epoch 103/500
11s - loss: 0.0086 - acc: 0.9967 - val_loss: 0.0390 - val_acc: 0.9890
Epoch 104/500
11s - loss: 0.0086 - acc: 0.9968 - val_loss: 0.0383 - val_acc: 0.9887
Epoch 105/500
11s - loss: 0.0085 - acc: 0.9969 - val_loss: 0.0402 - val_acc: 0.9889
Epoch 106/500
11s - loss: 0.0081 - acc: 0.9969 - val_loss: 0.0419 - val_acc: 0.9889
Epoch 107/500
11s - loss: 0.0081 - acc: 0.9971 - val_loss: 0.0395 - val_acc: 0.9891
Epoch 108/500
11s - loss: 0.0082 - acc: 0.9970 - val_loss: 0.0409 - val_acc: 0.9890
Epoch 109/500
11s - loss: 0.0078 - acc: 0.9971 - val_loss: 0.0413 - val_acc: 0.9891
Epoch 110/500
11s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0381 - val_acc: 0.9891
Epoch 111/500
11s - loss: 0.0079 - acc: 0.9970 - val_loss: 0.0411 - val_acc: 0.9891
Epoch 112/500
11s - loss: 0.0081 - acc: 0.9968 - val_loss: 0.0384 - val_acc: 0.9892
Epoch 113/500
11s - loss: 0.0079 - acc: 0.9971 - val_loss: 0.0389 - val_acc: 0.9890
Epoch 114/500
11s - loss: 0.0078 - acc: 0.9972 - val_loss: 0.0389 - val_acc: 0.9895
Epoch 115/500
11s - loss: 0.0078 - acc: 0.9971 - val_loss: 0.0396 - val_acc: 0.9890
Epoch 116/500
11s - loss: 0.0079 - acc: 0.9971 - val_loss: 0.0392 - val_acc: 0.9894
Epoch 117/500
11s - loss: 0.0080 - acc: 0.9969 - val_loss: 0.0398 - val_acc: 0.9888
Epoch 118/500
11s - loss: 0.0080 - acc: 0.9970 - val_loss: 0.0391 - val_acc: 0.9889
Epoch 119/500
11s - loss: 0.0082 - acc: 0.9969 - val_loss: 0.0407 - val_acc: 0.9889
Epoch 120/500
11s - loss: 0.0075 - acc: 0.9971 - val_loss: 0.0435 - val_acc: 0.9892
Epoch 121/500
11s - loss: 0.0077 - acc: 0.9970 - val_loss: 0.0421 - val_acc: 0.9893
Epoch 122/500
11s - loss: 0.0078 - acc: 0.9971 - val_loss: 0.0409 - val_acc: 0.9893
Epoch 123/500
11s - loss: 0.0072 - acc: 0.9973 - val_loss: 0.0421 - val_acc: 0.9890
Epoch 124/500
11s - loss: 0.0077 - acc: 0.9971 - val_loss: 0.0414 - val_acc: 0.9890
Epoch 125/500
11s - loss: 0.0080 - acc: 0.9970 - val_loss: 0.0408 - val_acc: 0.9891
Epoch 126/500
11s - loss: 0.0079 - acc: 0.9971 - val_loss: 0.0401 - val_acc: 0.9891
Epoch 127/500
11s - loss: 0.0073 - acc: 0.9973 - val_loss: 0.0397 - val_acc: 0.9890
Epoch 128/500
11s - loss: 0.0074 - acc: 0.9972 - val_loss: 0.0430 - val_acc: 0.9893
Epoch 129/500
11s - loss: 0.0076 - acc: 0.9971 - val_loss: 0.0406 - val_acc: 0.9891
71680/73384 [============================>.] - ETA: 0sTRAIN
             precision    recall  f1-score   support

          0       1.00      1.00      1.00    357403
          1       1.00      1.00      1.00     10501

avg / total       1.00      1.00      1.00    367904

TP=3724, FN=2, FP=6
precision=0.998391420912, recall=0.9994632313472893, F1=0.998927038627

TP=3724, FN=2, FP=2
precision=0.999463231347, recall=0.9994632313472893, F1=0.999463231347

TEST
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     71020
          1       0.83      0.83      0.83      2364

avg / total       0.99      0.99      0.99     73384

TP=834, FN=18, FP=91
precision=0.901621621622, recall=0.9788732394366197, F1=0.938660664041

TP=841, FN=11, FP=22
precision=0.974507531866, recall=0.9870892018779343, F1=0.980758017493


Creating spectrograms
Spectrogram data infos:
(341470, 80)
(341470,)
9912
(341470,)
3557
(341470,)
1.77213225174
(99818, 80)
(99818,)
2953
(99818,)
1021
(99818,)
1.81648600453
(341470, 80)
(99818, 80)
-12.3365115231
6.51216540384
-12.9411989117
6.38052335841

Standardizing (each band separately)
Data after standardizing:
(341470, 80)
(99818, 80)
1.11865690908e-17
1.0
-0.101160463138
0.979084769637

(341470, 15, 80)
(99818, 15, 80)
Reshaping data
(341470, 1, 15, 80)
(99818, 1, 15, 80)
(1, 15, 80)
Concatenating channels
(341470, 1, 15, 80)
(99818, 1, 15, 80)
Train on 341470 samples, validate on 99818 samples
Epoch 1/500
10s - loss: 0.0577 - acc: 0.9790 - val_loss: 0.0328 - val_acc: 0.9864
Epoch 2/500
10s - loss: 0.0372 - acc: 0.9851 - val_loss: 0.0306 - val_acc: 0.9879
Epoch 3/500
10s - loss: 0.0345 - acc: 0.9860 - val_loss: 0.0290 - val_acc: 0.9885
Epoch 4/500
10s - loss: 0.0322 - acc: 0.9867 - val_loss: 0.0281 - val_acc: 0.9890
Epoch 5/500
10s - loss: 0.0308 - acc: 0.9874 - val_loss: 0.0270 - val_acc: 0.9892
Epoch 6/500
11s - loss: 0.0293 - acc: 0.9880 - val_loss: 0.0264 - val_acc: 0.9892
Epoch 7/500
11s - loss: 0.0290 - acc: 0.9882 - val_loss: 0.0258 - val_acc: 0.9898
Epoch 8/500
10s - loss: 0.0278 - acc: 0.9886 - val_loss: 0.0275 - val_acc: 0.9892
Epoch 9/500
11s - loss: 0.0272 - acc: 0.9889 - val_loss: 0.0257 - val_acc: 0.9898
Epoch 10/500
10s - loss: 0.0266 - acc: 0.9890 - val_loss: 0.0245 - val_acc: 0.9902
Epoch 11/500
10s - loss: 0.0265 - acc: 0.9893 - val_loss: 0.0245 - val_acc: 0.9906
Epoch 12/500
10s - loss: 0.0259 - acc: 0.9894 - val_loss: 0.0250 - val_acc: 0.9905
Epoch 13/500
10s - loss: 0.0256 - acc: 0.9895 - val_loss: 0.0250 - val_acc: 0.9901
Epoch 14/500
10s - loss: 0.0247 - acc: 0.9898 - val_loss: 0.0241 - val_acc: 0.9908
Epoch 15/500
10s - loss: 0.0245 - acc: 0.9899 - val_loss: 0.0242 - val_acc: 0.9900
Epoch 16/500
10s - loss: 0.0241 - acc: 0.9900 - val_loss: 0.0246 - val_acc: 0.9900
Epoch 17/500
10s - loss: 0.0239 - acc: 0.9902 - val_loss: 0.0235 - val_acc: 0.9907
Epoch 18/500
11s - loss: 0.0236 - acc: 0.9902 - val_loss: 0.0238 - val_acc: 0.9907
Epoch 19/500
10s - loss: 0.0229 - acc: 0.9905 - val_loss: 0.0235 - val_acc: 0.9908
Epoch 20/500
11s - loss: 0.0228 - acc: 0.9907 - val_loss: 0.0236 - val_acc: 0.9909
Epoch 21/500
11s - loss: 0.0223 - acc: 0.9909 - val_loss: 0.0243 - val_acc: 0.9908
Epoch 22/500
10s - loss: 0.0223 - acc: 0.9909 - val_loss: 0.0234 - val_acc: 0.9910
Epoch 23/500
10s - loss: 0.0221 - acc: 0.9910 - val_loss: 0.0239 - val_acc: 0.9906
Epoch 24/500
10s - loss: 0.0218 - acc: 0.9911 - val_loss: 0.0237 - val_acc: 0.9910
Epoch 25/500
10s - loss: 0.0210 - acc: 0.9912 - val_loss: 0.0239 - val_acc: 0.9907
Epoch 26/500
10s - loss: 0.0211 - acc: 0.9912 - val_loss: 0.0236 - val_acc: 0.9909
Epoch 27/500
10s - loss: 0.0207 - acc: 0.9914 - val_loss: 0.0237 - val_acc: 0.9907
Epoch 28/500
10s - loss: 0.0207 - acc: 0.9916 - val_loss: 0.0241 - val_acc: 0.9908
Epoch 29/500
10s - loss: 0.0201 - acc: 0.9918 - val_loss: 0.0241 - val_acc: 0.9908
Epoch 30/500
10s - loss: 0.0196 - acc: 0.9920 - val_loss: 0.0240 - val_acc: 0.9904
Epoch 31/500
11s - loss: 0.0196 - acc: 0.9918 - val_loss: 0.0240 - val_acc: 0.9909
Epoch 32/500
11s - loss: 0.0192 - acc: 0.9921 - val_loss: 0.0239 - val_acc: 0.9908
Epoch 33/500
10s - loss: 0.0190 - acc: 0.9921 - val_loss: 0.0247 - val_acc: 0.9902
Epoch 34/500
11s - loss: 0.0189 - acc: 0.9922 - val_loss: 0.0250 - val_acc: 0.9907
Epoch 35/500
10s - loss: 0.0185 - acc: 0.9924 - val_loss: 0.0249 - val_acc: 0.9907
Epoch 36/500
10s - loss: 0.0184 - acc: 0.9925 - val_loss: 0.0253 - val_acc: 0.9904
Epoch 37/500
10s - loss: 0.0180 - acc: 0.9926 - val_loss: 0.0246 - val_acc: 0.9906
Epoch 38/500
10s - loss: 0.0177 - acc: 0.9928 - val_loss: 0.0244 - val_acc: 0.9906
Epoch 39/500
10s - loss: 0.0175 - acc: 0.9930 - val_loss: 0.0253 - val_acc: 0.9907
Epoch 40/500
10s - loss: 0.0175 - acc: 0.9927 - val_loss: 0.0253 - val_acc: 0.9906
Epoch 41/500
10s - loss: 0.0170 - acc: 0.9931 - val_loss: 0.0260 - val_acc: 0.9907
Epoch 42/500
10s - loss: 0.0166 - acc: 0.9932 - val_loss: 0.0260 - val_acc: 0.9902
Epoch 43/500
10s - loss: 0.0164 - acc: 0.9933 - val_loss: 0.0264 - val_acc: 0.9902
Epoch 44/500
10s - loss: 0.0162 - acc: 0.9935 - val_loss: 0.0262 - val_acc: 0.9905
Epoch 45/500
10s - loss: 0.0158 - acc: 0.9935 - val_loss: 0.0261 - val_acc: 0.9906
Epoch 46/500
10s - loss: 0.0158 - acc: 0.9938 - val_loss: 0.0259 - val_acc: 0.9906
Epoch 47/500
10s - loss: 0.0156 - acc: 0.9939 - val_loss: 0.0258 - val_acc: 0.9908
Epoch 48/500
10s - loss: 0.0150 - acc: 0.9940 - val_loss: 0.0269 - val_acc: 0.9904
Epoch 49/500
10s - loss: 0.0150 - acc: 0.9939 - val_loss: 0.0273 - val_acc: 0.9903
Epoch 50/500
10s - loss: 0.0149 - acc: 0.9941 - val_loss: 0.0271 - val_acc: 0.9905
Epoch 51/500
10s - loss: 0.0147 - acc: 0.9941 - val_loss: 0.0273 - val_acc: 0.9902
Epoch 52/500
10s - loss: 0.0142 - acc: 0.9943 - val_loss: 0.0270 - val_acc: 0.9905
Epoch 53/500
10s - loss: 0.0142 - acc: 0.9943 - val_loss: 0.0277 - val_acc: 0.9904
Epoch 54/500
11s - loss: 0.0138 - acc: 0.9943 - val_loss: 0.0280 - val_acc: 0.9905
Epoch 55/500
10s - loss: 0.0136 - acc: 0.9946 - val_loss: 0.0283 - val_acc: 0.9904
Epoch 56/500
10s - loss: 0.0140 - acc: 0.9944 - val_loss: 0.0280 - val_acc: 0.9904
Epoch 57/500
10s - loss: 0.0134 - acc: 0.9946 - val_loss: 0.0270 - val_acc: 0.9905
Epoch 58/500
10s - loss: 0.0132 - acc: 0.9948 - val_loss: 0.0285 - val_acc: 0.9903
Epoch 59/500
10s - loss: 0.0129 - acc: 0.9949 - val_loss: 0.0288 - val_acc: 0.9901
Epoch 60/500
10s - loss: 0.0131 - acc: 0.9949 - val_loss: 0.0282 - val_acc: 0.9907
Epoch 61/500
10s - loss: 0.0127 - acc: 0.9950 - val_loss: 0.0284 - val_acc: 0.9906
Epoch 62/500
10s - loss: 0.0124 - acc: 0.9951 - val_loss: 0.0292 - val_acc: 0.9903
Epoch 63/500
10s - loss: 0.0126 - acc: 0.9952 - val_loss: 0.0285 - val_acc: 0.9901
Epoch 64/500
10s - loss: 0.0123 - acc: 0.9952 - val_loss: 0.0296 - val_acc: 0.9899
Epoch 65/500
10s - loss: 0.0122 - acc: 0.9951 - val_loss: 0.0294 - val_acc: 0.9901
Epoch 66/500
10s - loss: 0.0118 - acc: 0.9954 - val_loss: 0.0296 - val_acc: 0.9904
Epoch 67/500
10s - loss: 0.0117 - acc: 0.9953 - val_loss: 0.0300 - val_acc: 0.9902
Epoch 68/500
10s - loss: 0.0119 - acc: 0.9952 - val_loss: 0.0318 - val_acc: 0.9904
Epoch 69/500
10s - loss: 0.0117 - acc: 0.9955 - val_loss: 0.0299 - val_acc: 0.9906
Epoch 70/500
10s - loss: 0.0112 - acc: 0.9957 - val_loss: 0.0316 - val_acc: 0.9906
Epoch 71/500
10s - loss: 0.0113 - acc: 0.9956 - val_loss: 0.0303 - val_acc: 0.9903
Epoch 72/500
10s - loss: 0.0111 - acc: 0.9958 - val_loss: 0.0315 - val_acc: 0.9904
Epoch 73/500
10s - loss: 0.0112 - acc: 0.9956 - val_loss: 0.0302 - val_acc: 0.9905
Epoch 74/500
10s - loss: 0.0107 - acc: 0.9958 - val_loss: 0.0325 - val_acc: 0.9902
Epoch 75/500
10s - loss: 0.0109 - acc: 0.9957 - val_loss: 0.0311 - val_acc: 0.9902
Epoch 76/500
10s - loss: 0.0106 - acc: 0.9959 - val_loss: 0.0326 - val_acc: 0.9900
Epoch 77/500
10s - loss: 0.0107 - acc: 0.9958 - val_loss: 0.0322 - val_acc: 0.9902
Epoch 78/500
10s - loss: 0.0101 - acc: 0.9960 - val_loss: 0.0315 - val_acc: 0.9901
Epoch 79/500
10s - loss: 0.0107 - acc: 0.9958 - val_loss: 0.0320 - val_acc: 0.9900
Epoch 80/500
10s - loss: 0.0108 - acc: 0.9958 - val_loss: 0.0311 - val_acc: 0.9904
Epoch 81/500
10s - loss: 0.0103 - acc: 0.9960 - val_loss: 0.0320 - val_acc: 0.9901
Epoch 82/500
10s - loss: 0.0100 - acc: 0.9961 - val_loss: 0.0318 - val_acc: 0.9903
Epoch 83/500
10s - loss: 0.0102 - acc: 0.9960 - val_loss: 0.0335 - val_acc: 0.9901
Epoch 84/500
10s - loss: 0.0100 - acc: 0.9961 - val_loss: 0.0326 - val_acc: 0.9900
Epoch 85/500
10s - loss: 0.0101 - acc: 0.9960 - val_loss: 0.0332 - val_acc: 0.9902
Epoch 86/500
10s - loss: 0.0096 - acc: 0.9964 - val_loss: 0.0323 - val_acc: 0.9902
Epoch 87/500
10s - loss: 0.0099 - acc: 0.9962 - val_loss: 0.0313 - val_acc: 0.9904
Epoch 88/500
11s - loss: 0.0097 - acc: 0.9962 - val_loss: 0.0324 - val_acc: 0.9901
Epoch 89/500
10s - loss: 0.0099 - acc: 0.9963 - val_loss: 0.0331 - val_acc: 0.9900
Epoch 90/500
11s - loss: 0.0093 - acc: 0.9965 - val_loss: 0.0334 - val_acc: 0.9900
Epoch 91/500
10s - loss: 0.0096 - acc: 0.9963 - val_loss: 0.0329 - val_acc: 0.9904
Epoch 92/500
11s - loss: 0.0094 - acc: 0.9963 - val_loss: 0.0330 - val_acc: 0.9905
Epoch 93/500
10s - loss: 0.0095 - acc: 0.9962 - val_loss: 0.0332 - val_acc: 0.9900
Epoch 94/500
11s - loss: 0.0091 - acc: 0.9964 - val_loss: 0.0337 - val_acc: 0.9901
Epoch 95/500
11s - loss: 0.0089 - acc: 0.9965 - val_loss: 0.0346 - val_acc: 0.9903
Epoch 96/500
11s - loss: 0.0091 - acc: 0.9965 - val_loss: 0.0344 - val_acc: 0.9902
Epoch 97/500
11s - loss: 0.0087 - acc: 0.9967 - val_loss: 0.0367 - val_acc: 0.9898
Epoch 98/500
11s - loss: 0.0089 - acc: 0.9966 - val_loss: 0.0340 - val_acc: 0.9900
Epoch 99/500
10s - loss: 0.0088 - acc: 0.9966 - val_loss: 0.0343 - val_acc: 0.9900
Epoch 100/500
10s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0341 - val_acc: 0.9901
Epoch 101/500
11s - loss: 0.0084 - acc: 0.9969 - val_loss: 0.0340 - val_acc: 0.9900
Epoch 102/500
11s - loss: 0.0082 - acc: 0.9969 - val_loss: 0.0354 - val_acc: 0.9900
Epoch 103/500
11s - loss: 0.0084 - acc: 0.9967 - val_loss: 0.0356 - val_acc: 0.9899
Epoch 104/500
11s - loss: 0.0088 - acc: 0.9967 - val_loss: 0.0346 - val_acc: 0.9901
Epoch 105/500
10s - loss: 0.0090 - acc: 0.9965 - val_loss: 0.0348 - val_acc: 0.9900
Epoch 106/500
11s - loss: 0.0087 - acc: 0.9967 - val_loss: 0.0343 - val_acc: 0.9900
Epoch 107/500
10s - loss: 0.0088 - acc: 0.9967 - val_loss: 0.0336 - val_acc: 0.9901
Epoch 108/500
10s - loss: 0.0088 - acc: 0.9967 - val_loss: 0.0340 - val_acc: 0.9901
99328/99818 [============================>.] - ETA: 0sTRAIN
             precision    recall  f1-score   support

          0       1.00      1.00      1.00    331558
          1       0.99      1.00      0.99      9912

avg / total       1.00      1.00      1.00    341470

TP=3556, FN=1, FP=5
precision=0.998595900028, recall=0.9997188642114141, F1=0.999157066592

TP=3556, FN=1, FP=2
precision=0.999437886453, recall=0.9997188642114141, F1=0.999578355587

TEST
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     96865
          1       0.83      0.83      0.83      2953

avg / total       0.99      0.99      0.99     99818

TP=1008, FN=13, FP=121
precision=0.8928255093, recall=0.9872673849167483, F1=0.937674418605

TP=1012, FN=9, FP=39
precision=0.962892483349, recall=0.9911851126346719, F1=0.976833976834


Creating spectrograms
Spectrogram data infos:
(348735, 80)
(348735,)
9970
(348735,)
3544
(348735,)
1.77385694008
(92553, 80)
(92553,)
2895
(92553,)
1034
(92553,)
1.81346903936
(348735, 80)
(92553, 80)
-12.6041554337
6.44664030397
-11.9801955377
6.6162526049

Standardizing (each band separately)
Data after standardizing:
(348735, 80)
(92553, 80)
-1.89013516025e-16
1.0
0.103668529158
1.02231702734

(348735, 15, 80)
(92553, 15, 80)
Reshaping data
(348735, 1, 15, 80)
(92553, 1, 15, 80)
(1, 15, 80)
Concatenating channels
(348735, 1, 15, 80)
(92553, 1, 15, 80)
Train on 348735 samples, validate on 92553 samples
Epoch 1/500
11s - loss: 0.0512 - acc: 0.9818 - val_loss: 0.0415 - val_acc: 0.9828
Epoch 2/500
11s - loss: 0.0341 - acc: 0.9862 - val_loss: 0.0374 - val_acc: 0.9844
Epoch 3/500
11s - loss: 0.0319 - acc: 0.9870 - val_loss: 0.0355 - val_acc: 0.9851
Epoch 4/500
11s - loss: 0.0306 - acc: 0.9876 - val_loss: 0.0369 - val_acc: 0.9845
Epoch 5/500
11s - loss: 0.0290 - acc: 0.9884 - val_loss: 0.0345 - val_acc: 0.9858
Epoch 6/500
11s - loss: 0.0281 - acc: 0.9888 - val_loss: 0.0331 - val_acc: 0.9863
Epoch 7/500
11s - loss: 0.0273 - acc: 0.9892 - val_loss: 0.0334 - val_acc: 0.9860
Epoch 8/500
11s - loss: 0.0267 - acc: 0.9893 - val_loss: 0.0323 - val_acc: 0.9864
Epoch 9/500
11s - loss: 0.0256 - acc: 0.9898 - val_loss: 0.0330 - val_acc: 0.9861
Epoch 10/500
11s - loss: 0.0250 - acc: 0.9900 - val_loss: 0.0311 - val_acc: 0.9872
Epoch 11/500
11s - loss: 0.0246 - acc: 0.9903 - val_loss: 0.0312 - val_acc: 0.9871
Epoch 12/500
11s - loss: 0.0243 - acc: 0.9902 - val_loss: 0.0309 - val_acc: 0.9868
Epoch 13/500
11s - loss: 0.0238 - acc: 0.9905 - val_loss: 0.0305 - val_acc: 0.9873
Epoch 14/500
11s - loss: 0.0233 - acc: 0.9907 - val_loss: 0.0331 - val_acc: 0.9867
Epoch 15/500
11s - loss: 0.0234 - acc: 0.9907 - val_loss: 0.0322 - val_acc: 0.9870
Epoch 16/500
11s - loss: 0.0228 - acc: 0.9909 - val_loss: 0.0316 - val_acc: 0.9872
Epoch 17/500
11s - loss: 0.0223 - acc: 0.9911 - val_loss: 0.0325 - val_acc: 0.9870
Epoch 18/500
11s - loss: 0.0220 - acc: 0.9911 - val_loss: 0.0306 - val_acc: 0.9874
Epoch 19/500
11s - loss: 0.0219 - acc: 0.9913 - val_loss: 0.0308 - val_acc: 0.9874
Epoch 20/500
11s - loss: 0.0211 - acc: 0.9915 - val_loss: 0.0313 - val_acc: 0.9875
Epoch 21/500
11s - loss: 0.0211 - acc: 0.9915 - val_loss: 0.0301 - val_acc: 0.9877
Epoch 22/500
11s - loss: 0.0209 - acc: 0.9916 - val_loss: 0.0299 - val_acc: 0.9878
Epoch 23/500
11s - loss: 0.0204 - acc: 0.9918 - val_loss: 0.0311 - val_acc: 0.9877
Epoch 24/500
11s - loss: 0.0204 - acc: 0.9918 - val_loss: 0.0303 - val_acc: 0.9880
Epoch 25/500
11s - loss: 0.0198 - acc: 0.9920 - val_loss: 0.0303 - val_acc: 0.9877
Epoch 26/500
11s - loss: 0.0197 - acc: 0.9919 - val_loss: 0.0315 - val_acc: 0.9878
Epoch 27/500
11s - loss: 0.0197 - acc: 0.9921 - val_loss: 0.0318 - val_acc: 0.9877
Epoch 28/500
11s - loss: 0.0191 - acc: 0.9923 - val_loss: 0.0304 - val_acc: 0.9881
Epoch 29/500
11s - loss: 0.0192 - acc: 0.9922 - val_loss: 0.0309 - val_acc: 0.9879
Epoch 30/500
11s - loss: 0.0188 - acc: 0.9923 - val_loss: 0.0309 - val_acc: 0.9877
Epoch 31/500
11s - loss: 0.0186 - acc: 0.9926 - val_loss: 0.0316 - val_acc: 0.9880
Epoch 32/500
11s - loss: 0.0183 - acc: 0.9927 - val_loss: 0.0309 - val_acc: 0.9882
Epoch 33/500
11s - loss: 0.0181 - acc: 0.9927 - val_loss: 0.0320 - val_acc: 0.9883
Epoch 34/500
11s - loss: 0.0175 - acc: 0.9929 - val_loss: 0.0321 - val_acc: 0.9882
Epoch 35/500
11s - loss: 0.0176 - acc: 0.9929 - val_loss: 0.0310 - val_acc: 0.9881
Epoch 36/500
11s - loss: 0.0174 - acc: 0.9929 - val_loss: 0.0322 - val_acc: 0.9877
Epoch 37/500
11s - loss: 0.0171 - acc: 0.9931 - val_loss: 0.0336 - val_acc: 0.9879
Epoch 38/500
11s - loss: 0.0172 - acc: 0.9930 - val_loss: 0.0315 - val_acc: 0.9880
Epoch 39/500
11s - loss: 0.0169 - acc: 0.9931 - val_loss: 0.0325 - val_acc: 0.9882
Epoch 40/500
11s - loss: 0.0166 - acc: 0.9933 - val_loss: 0.0313 - val_acc: 0.9882
Epoch 41/500
11s - loss: 0.0162 - acc: 0.9935 - val_loss: 0.0313 - val_acc: 0.9882
Epoch 42/500
11s - loss: 0.0158 - acc: 0.9936 - val_loss: 0.0323 - val_acc: 0.9884
Epoch 43/500
11s - loss: 0.0160 - acc: 0.9937 - val_loss: 0.0330 - val_acc: 0.9878
Epoch 44/500
11s - loss: 0.0156 - acc: 0.9937 - val_loss: 0.0327 - val_acc: 0.9881
Epoch 45/500
11s - loss: 0.0154 - acc: 0.9937 - val_loss: 0.0333 - val_acc: 0.9880
Epoch 46/500
11s - loss: 0.0151 - acc: 0.9939 - val_loss: 0.0330 - val_acc: 0.9882
Epoch 47/500
11s - loss: 0.0150 - acc: 0.9940 - val_loss: 0.0325 - val_acc: 0.9883
Epoch 48/500
11s - loss: 0.0149 - acc: 0.9939 - val_loss: 0.0343 - val_acc: 0.9878
Epoch 49/500
11s - loss: 0.0145 - acc: 0.9941 - val_loss: 0.0329 - val_acc: 0.9881
Epoch 50/500
11s - loss: 0.0145 - acc: 0.9942 - val_loss: 0.0347 - val_acc: 0.9880
Epoch 51/500
11s - loss: 0.0144 - acc: 0.9943 - val_loss: 0.0363 - val_acc: 0.9879
Epoch 52/500
11s - loss: 0.0141 - acc: 0.9943 - val_loss: 0.0353 - val_acc: 0.9879
Epoch 53/500
11s - loss: 0.0136 - acc: 0.9944 - val_loss: 0.0368 - val_acc: 0.9879
Epoch 54/500
11s - loss: 0.0139 - acc: 0.9944 - val_loss: 0.0361 - val_acc: 0.9881
Epoch 55/500
11s - loss: 0.0137 - acc: 0.9945 - val_loss: 0.0356 - val_acc: 0.9880
Epoch 56/500
11s - loss: 0.0133 - acc: 0.9945 - val_loss: 0.0351 - val_acc: 0.9876
Epoch 57/500
11s - loss: 0.0130 - acc: 0.9948 - val_loss: 0.0366 - val_acc: 0.9879
Epoch 58/500
11s - loss: 0.0135 - acc: 0.9946 - val_loss: 0.0356 - val_acc: 0.9877
Epoch 59/500
11s - loss: 0.0129 - acc: 0.9949 - val_loss: 0.0377 - val_acc: 0.9880
Epoch 60/500
11s - loss: 0.0127 - acc: 0.9950 - val_loss: 0.0364 - val_acc: 0.9876
Epoch 61/500
11s - loss: 0.0126 - acc: 0.9949 - val_loss: 0.0378 - val_acc: 0.9879
Epoch 62/500
11s - loss: 0.0126 - acc: 0.9950 - val_loss: 0.0360 - val_acc: 0.9876
Epoch 63/500
11s - loss: 0.0125 - acc: 0.9949 - val_loss: 0.0406 - val_acc: 0.9878
Epoch 64/500
11s - loss: 0.0125 - acc: 0.9950 - val_loss: 0.0366 - val_acc: 0.9880
Epoch 65/500
11s - loss: 0.0122 - acc: 0.9952 - val_loss: 0.0376 - val_acc: 0.9880
Epoch 66/500
11s - loss: 0.0118 - acc: 0.9952 - val_loss: 0.0382 - val_acc: 0.9879
Epoch 67/500
11s - loss: 0.0121 - acc: 0.9952 - val_loss: 0.0395 - val_acc: 0.9878
Epoch 68/500
11s - loss: 0.0117 - acc: 0.9952 - val_loss: 0.0397 - val_acc: 0.9877
Epoch 69/500
11s - loss: 0.0115 - acc: 0.9955 - val_loss: 0.0373 - val_acc: 0.9877
Epoch 70/500
11s - loss: 0.0116 - acc: 0.9953 - val_loss: 0.0390 - val_acc: 0.9875
Epoch 71/500
11s - loss: 0.0118 - acc: 0.9955 - val_loss: 0.0388 - val_acc: 0.9874
Epoch 72/500
11s - loss: 0.0111 - acc: 0.9956 - val_loss: 0.0401 - val_acc: 0.9876
Epoch 73/500
11s - loss: 0.0111 - acc: 0.9955 - val_loss: 0.0391 - val_acc: 0.9878
Epoch 74/500
11s - loss: 0.0109 - acc: 0.9957 - val_loss: 0.0402 - val_acc: 0.9878
Epoch 75/500
11s - loss: 0.0110 - acc: 0.9956 - val_loss: 0.0363 - val_acc: 0.9878
Epoch 76/500
11s - loss: 0.0109 - acc: 0.9957 - val_loss: 0.0411 - val_acc: 0.9874
Epoch 77/500
11s - loss: 0.0108 - acc: 0.9956 - val_loss: 0.0387 - val_acc: 0.9879
Epoch 78/500
11s - loss: 0.0107 - acc: 0.9958 - val_loss: 0.0415 - val_acc: 0.9874
Epoch 79/500
11s - loss: 0.0106 - acc: 0.9957 - val_loss: 0.0410 - val_acc: 0.9875
Epoch 80/500
11s - loss: 0.0105 - acc: 0.9958 - val_loss: 0.0426 - val_acc: 0.9875
Epoch 81/500
11s - loss: 0.0106 - acc: 0.9959 - val_loss: 0.0432 - val_acc: 0.9876
Epoch 82/500
11s - loss: 0.0101 - acc: 0.9959 - val_loss: 0.0413 - val_acc: 0.9877
Epoch 83/500
11s - loss: 0.0102 - acc: 0.9960 - val_loss: 0.0414 - val_acc: 0.9880
Epoch 84/500
11s - loss: 0.0103 - acc: 0.9960 - val_loss: 0.0412 - val_acc: 0.9878
Epoch 85/500
11s - loss: 0.0101 - acc: 0.9960 - val_loss: 0.0424 - val_acc: 0.9875
Epoch 86/500
11s - loss: 0.0099 - acc: 0.9962 - val_loss: 0.0398 - val_acc: 0.9880
Epoch 87/500
11s - loss: 0.0099 - acc: 0.9961 - val_loss: 0.0419 - val_acc: 0.9878
Epoch 88/500
11s - loss: 0.0096 - acc: 0.9962 - val_loss: 0.0407 - val_acc: 0.9879
Epoch 89/500
11s - loss: 0.0097 - acc: 0.9961 - val_loss: 0.0401 - val_acc: 0.9879
Epoch 90/500
11s - loss: 0.0097 - acc: 0.9961 - val_loss: 0.0422 - val_acc: 0.9874
Epoch 91/500
11s - loss: 0.0095 - acc: 0.9963 - val_loss: 0.0422 - val_acc: 0.9875
Epoch 92/500
11s - loss: 0.0094 - acc: 0.9963 - val_loss: 0.0422 - val_acc: 0.9879
Epoch 93/500
11s - loss: 0.0089 - acc: 0.9964 - val_loss: 0.0413 - val_acc: 0.9880
Epoch 94/500
11s - loss: 0.0092 - acc: 0.9965 - val_loss: 0.0486 - val_acc: 0.9874
Epoch 95/500
11s - loss: 0.0093 - acc: 0.9963 - val_loss: 0.0443 - val_acc: 0.9874
Epoch 96/500
11s - loss: 0.0093 - acc: 0.9964 - val_loss: 0.0463 - val_acc: 0.9874
Epoch 97/500
11s - loss: 0.0092 - acc: 0.9965 - val_loss: 0.0430 - val_acc: 0.9877
Epoch 98/500
11s - loss: 0.0090 - acc: 0.9965 - val_loss: 0.0459 - val_acc: 0.9876
Epoch 99/500
11s - loss: 0.0092 - acc: 0.9963 - val_loss: 0.0452 - val_acc: 0.9877
348735/348735 [==============================] - 4s     
91136/92553 [============================>.] - ETA: 0sTRAIN
             precision    recall  f1-score   support

          0       1.00      1.00      1.00    338765
          1       0.99      0.99      0.99      9970

avg / total       1.00      1.00      1.00    348735

TP=3543, FN=1, FP=10
precision=0.997185477062, recall=0.9997178329571106, F1=0.998450049317

TP=3543, FN=1, FP=5
precision=0.998590755355, recall=0.9997178329571106, F1=0.999153976311

TEST
             precision    recall  f1-score   support

          0       0.99      1.00      0.99     89658
          1       0.84      0.75      0.79      2895

avg / total       0.99      0.99      0.99     92553

TP=979, FN=55, FP=102
precision=0.905642923219, recall=0.9468085106382979, F1=0.925768321513

TP=1000, FN=34, FP=15
precision=0.985221674877, recall=0.9671179883945842, F1=0.976085895559


Creating spectrograms
Spectrogram data infos:
(352084, 80)
(352084,)
10371
(352084,)
3682
(352084,)
1.7815152066
(89204, 80)
(89204,)
2494
(89204,)
896
(89204,)
1.78472938433
(352084, 80)
(89204, 80)
-12.4504815375
6.56822501202
-12.5633137652
6.15803152318

Standardizing (each band separately)
Data after standardizing:
(352084, 80)
(89204, 80)
1.10527614297e-16
1.0
-0.0174125625548
0.931817443078

(352084, 15, 80)
(89204, 15, 80)
Reshaping data
(352084, 1, 15, 80)
(89204, 1, 15, 80)
(1, 15, 80)
Concatenating channels
(352084, 1, 15, 80)
(89204, 1, 15, 80)
Train on 352084 samples, validate on 89204 samples
Epoch 1/500
11s - loss: 0.0581 - acc: 0.9784 - val_loss: 0.0379 - val_acc: 0.9854
Epoch 2/500
11s - loss: 0.0375 - acc: 0.9850 - val_loss: 0.0335 - val_acc: 0.9863
Epoch 3/500
11s - loss: 0.0344 - acc: 0.9860 - val_loss: 0.0315 - val_acc: 0.9871
Epoch 4/500
11s - loss: 0.0330 - acc: 0.9865 - val_loss: 0.0354 - val_acc: 0.9863
Epoch 5/500
11s - loss: 0.0315 - acc: 0.9872 - val_loss: 0.0305 - val_acc: 0.9882
Epoch 6/500
11s - loss: 0.0302 - acc: 0.9877 - val_loss: 0.0288 - val_acc: 0.9881
Epoch 7/500
11s - loss: 0.0291 - acc: 0.9881 - val_loss: 0.0315 - val_acc: 0.9881
Epoch 8/500
11s - loss: 0.0287 - acc: 0.9884 - val_loss: 0.0274 - val_acc: 0.9892
Epoch 9/500
11s - loss: 0.0274 - acc: 0.9890 - val_loss: 0.0261 - val_acc: 0.9895
Epoch 10/500
11s - loss: 0.0269 - acc: 0.9892 - val_loss: 0.0299 - val_acc: 0.9889
Epoch 11/500
11s - loss: 0.0265 - acc: 0.9895 - val_loss: 0.0261 - val_acc: 0.9896
Epoch 12/500
11s - loss: 0.0260 - acc: 0.9895 - val_loss: 0.0261 - val_acc: 0.9896
Epoch 13/500
11s - loss: 0.0253 - acc: 0.9898 - val_loss: 0.0255 - val_acc: 0.9897
Epoch 14/500
11s - loss: 0.0251 - acc: 0.9899 - val_loss: 0.0259 - val_acc: 0.9898
Epoch 15/500
11s - loss: 0.0247 - acc: 0.9901 - val_loss: 0.0277 - val_acc: 0.9897
Epoch 16/500
11s - loss: 0.0242 - acc: 0.9903 - val_loss: 0.0259 - val_acc: 0.9897
Epoch 17/500
11s - loss: 0.0237 - acc: 0.9905 - val_loss: 0.0267 - val_acc: 0.9899
Epoch 18/500
11s - loss: 0.0234 - acc: 0.9906 - val_loss: 0.0254 - val_acc: 0.9902
Epoch 19/500
11s - loss: 0.0230 - acc: 0.9907 - val_loss: 0.0245 - val_acc: 0.9901
Epoch 20/500
11s - loss: 0.0229 - acc: 0.9908 - val_loss: 0.0287 - val_acc: 0.9896
Epoch 21/500
11s - loss: 0.0227 - acc: 0.9908 - val_loss: 0.0244 - val_acc: 0.9904
Epoch 22/500
11s - loss: 0.0223 - acc: 0.9908 - val_loss: 0.0270 - val_acc: 0.9902
Epoch 23/500
11s - loss: 0.0219 - acc: 0.9910 - val_loss: 0.0253 - val_acc: 0.9904
Epoch 24/500
11s - loss: 0.0218 - acc: 0.9913 - val_loss: 0.0262 - val_acc: 0.9900
Epoch 25/500
11s - loss: 0.0211 - acc: 0.9913 - val_loss: 0.0273 - val_acc: 0.9903
Epoch 26/500
11s - loss: 0.0209 - acc: 0.9914 - val_loss: 0.0268 - val_acc: 0.9902
Epoch 27/500
11s - loss: 0.0208 - acc: 0.9915 - val_loss: 0.0267 - val_acc: 0.9903
Epoch 28/500
11s - loss: 0.0206 - acc: 0.9915 - val_loss: 0.0269 - val_acc: 0.9901
Epoch 29/500
11s - loss: 0.0201 - acc: 0.9918 - val_loss: 0.0260 - val_acc: 0.9903
Epoch 30/500
11s - loss: 0.0196 - acc: 0.9919 - val_loss: 0.0264 - val_acc: 0.9905
Epoch 31/500
11s - loss: 0.0197 - acc: 0.9920 - val_loss: 0.0282 - val_acc: 0.9903
Epoch 32/500
11s - loss: 0.0195 - acc: 0.9919 - val_loss: 0.0269 - val_acc: 0.9903
Epoch 33/500
11s - loss: 0.0192 - acc: 0.9921 - val_loss: 0.0281 - val_acc: 0.9904
Epoch 34/500
11s - loss: 0.0189 - acc: 0.9922 - val_loss: 0.0261 - val_acc: 0.9905
Epoch 35/500
11s - loss: 0.0187 - acc: 0.9924 - val_loss: 0.0257 - val_acc: 0.9906
Epoch 36/500
11s - loss: 0.0185 - acc: 0.9925 - val_loss: 0.0250 - val_acc: 0.9906
Epoch 37/500
11s - loss: 0.0184 - acc: 0.9926 - val_loss: 0.0272 - val_acc: 0.9906
Epoch 38/500
11s - loss: 0.0177 - acc: 0.9929 - val_loss: 0.0258 - val_acc: 0.9906
Epoch 39/500
11s - loss: 0.0176 - acc: 0.9928 - val_loss: 0.0261 - val_acc: 0.9905
Epoch 40/500
11s - loss: 0.0172 - acc: 0.9930 - val_loss: 0.0283 - val_acc: 0.9903
Epoch 41/500
11s - loss: 0.0172 - acc: 0.9930 - val_loss: 0.0284 - val_acc: 0.9903
Epoch 42/500
11s - loss: 0.0171 - acc: 0.9929 - val_loss: 0.0281 - val_acc: 0.9906
Epoch 43/500
11s - loss: 0.0171 - acc: 0.9931 - val_loss: 0.0282 - val_acc: 0.9907
Epoch 44/500
11s - loss: 0.0160 - acc: 0.9933 - val_loss: 0.0261 - val_acc: 0.9906
Epoch 45/500
11s - loss: 0.0163 - acc: 0.9934 - val_loss: 0.0283 - val_acc: 0.9906
Epoch 46/500
11s - loss: 0.0164 - acc: 0.9933 - val_loss: 0.0278 - val_acc: 0.9904
Epoch 47/500
11s - loss: 0.0155 - acc: 0.9935 - val_loss: 0.0275 - val_acc: 0.9907
Epoch 48/500
11s - loss: 0.0159 - acc: 0.9936 - val_loss: 0.0304 - val_acc: 0.9905
Epoch 49/500
11s - loss: 0.0154 - acc: 0.9936 - val_loss: 0.0271 - val_acc: 0.9906
Epoch 50/500
11s - loss: 0.0154 - acc: 0.9938 - val_loss: 0.0298 - val_acc: 0.9904
Epoch 51/500
11s - loss: 0.0152 - acc: 0.9938 - val_loss: 0.0282 - val_acc: 0.9905
Epoch 52/500
11s - loss: 0.0146 - acc: 0.9939 - val_loss: 0.0306 - val_acc: 0.9903
Epoch 53/500
11s - loss: 0.0147 - acc: 0.9939 - val_loss: 0.0302 - val_acc: 0.9903
Epoch 54/500
11s - loss: 0.0146 - acc: 0.9941 - val_loss: 0.0303 - val_acc: 0.9905
Epoch 55/500
11s - loss: 0.0145 - acc: 0.9941 - val_loss: 0.0290 - val_acc: 0.9903
Epoch 56/500
11s - loss: 0.0143 - acc: 0.9941 - val_loss: 0.0272 - val_acc: 0.9906
Epoch 57/500
11s - loss: 0.0137 - acc: 0.9944 - val_loss: 0.0310 - val_acc: 0.9904
Epoch 58/500
11s - loss: 0.0136 - acc: 0.9943 - val_loss: 0.0286 - val_acc: 0.9906
Epoch 59/500
11s - loss: 0.0134 - acc: 0.9945 - val_loss: 0.0285 - val_acc: 0.9905
Epoch 60/500
11s - loss: 0.0137 - acc: 0.9943 - val_loss: 0.0315 - val_acc: 0.9904
Epoch 61/500
11s - loss: 0.0134 - acc: 0.9945 - val_loss: 0.0301 - val_acc: 0.9905
Epoch 62/500
11s - loss: 0.0131 - acc: 0.9946 - val_loss: 0.0286 - val_acc: 0.9907
Epoch 63/500
11s - loss: 0.0128 - acc: 0.9948 - val_loss: 0.0321 - val_acc: 0.9905
Epoch 64/500
11s - loss: 0.0126 - acc: 0.9948 - val_loss: 0.0314 - val_acc: 0.9904
Epoch 65/500
11s - loss: 0.0126 - acc: 0.9950 - val_loss: 0.0336 - val_acc: 0.9905
Epoch 66/500
11s - loss: 0.0127 - acc: 0.9949 - val_loss: 0.0293 - val_acc: 0.9905
Epoch 67/500
11s - loss: 0.0127 - acc: 0.9948 - val_loss: 0.0327 - val_acc: 0.9903
Epoch 68/500
11s - loss: 0.0123 - acc: 0.9949 - val_loss: 0.0337 - val_acc: 0.9907
Epoch 69/500
11s - loss: 0.0120 - acc: 0.9950 - val_loss: 0.0318 - val_acc: 0.9904
Epoch 70/500
11s - loss: 0.0120 - acc: 0.9950 - val_loss: 0.0316 - val_acc: 0.9906
Epoch 71/500
11s - loss: 0.0117 - acc: 0.9954 - val_loss: 0.0303 - val_acc: 0.9904
Epoch 72/500
11s - loss: 0.0119 - acc: 0.9952 - val_loss: 0.0329 - val_acc: 0.9906
Epoch 73/500
11s - loss: 0.0116 - acc: 0.9953 - val_loss: 0.0302 - val_acc: 0.9906
Epoch 74/500
11s - loss: 0.0117 - acc: 0.9952 - val_loss: 0.0334 - val_acc: 0.9904
Epoch 75/500
11s - loss: 0.0114 - acc: 0.9954 - val_loss: 0.0341 - val_acc: 0.9903
Epoch 76/500
11s - loss: 0.0113 - acc: 0.9955 - val_loss: 0.0325 - val_acc: 0.9904
Epoch 77/500
11s - loss: 0.0112 - acc: 0.9955 - val_loss: 0.0324 - val_acc: 0.9905
Epoch 78/500
11s - loss: 0.0111 - acc: 0.9957 - val_loss: 0.0368 - val_acc: 0.9903
Epoch 79/500
11s - loss: 0.0114 - acc: 0.9954 - val_loss: 0.0340 - val_acc: 0.9901
Epoch 80/500
11s - loss: 0.0109 - acc: 0.9956 - val_loss: 0.0336 - val_acc: 0.9905
Epoch 81/500
11s - loss: 0.0105 - acc: 0.9958 - val_loss: 0.0379 - val_acc: 0.9900
Epoch 82/500
11s - loss: 0.0104 - acc: 0.9959 - val_loss: 0.0356 - val_acc: 0.9902
Epoch 83/500
11s - loss: 0.0103 - acc: 0.9958 - val_loss: 0.0348 - val_acc: 0.9904
Epoch 84/500
11s - loss: 0.0108 - acc: 0.9958 - val_loss: 0.0333 - val_acc: 0.9903
Epoch 85/500
11s - loss: 0.0102 - acc: 0.9960 - val_loss: 0.0358 - val_acc: 0.9902
Epoch 86/500
11s - loss: 0.0109 - acc: 0.9956 - val_loss: 0.0370 - val_acc: 0.9903
Epoch 87/500
11s - loss: 0.0100 - acc: 0.9960 - val_loss: 0.0369 - val_acc: 0.9902
Epoch 88/500
11s - loss: 0.0103 - acc: 0.9959 - val_loss: 0.0396 - val_acc: 0.9900
Epoch 89/500
11s - loss: 0.0104 - acc: 0.9959 - val_loss: 0.0335 - val_acc: 0.9903
Epoch 90/500
11s - loss: 0.0102 - acc: 0.9959 - val_loss: 0.0344 - val_acc: 0.9902
Epoch 91/500
11s - loss: 0.0098 - acc: 0.9960 - val_loss: 0.0367 - val_acc: 0.9905
Epoch 92/500
11s - loss: 0.0102 - acc: 0.9960 - val_loss: 0.0380 - val_acc: 0.9904
Epoch 93/500
11s - loss: 0.0097 - acc: 0.9960 - val_loss: 0.0333 - val_acc: 0.9902
Epoch 94/500
11s - loss: 0.0096 - acc: 0.9962 - val_loss: 0.0377 - val_acc: 0.9901
Epoch 95/500
11s - loss: 0.0094 - acc: 0.9962 - val_loss: 0.0358 - val_acc: 0.9902
Epoch 96/500
11s - loss: 0.0097 - acc: 0.9961 - val_loss: 0.0377 - val_acc: 0.9900
Epoch 97/500
11s - loss: 0.0096 - acc: 0.9963 - val_loss: 0.0369 - val_acc: 0.9903
Epoch 98/500
11s - loss: 0.0097 - acc: 0.9961 - val_loss: 0.0332 - val_acc: 0.9903
Epoch 99/500
11s - loss: 0.0097 - acc: 0.9962 - val_loss: 0.0336 - val_acc: 0.9902
Epoch 100/500
11s - loss: 0.0096 - acc: 0.9962 - val_loss: 0.0352 - val_acc: 0.9904
Epoch 101/500
11s - loss: 0.0092 - acc: 0.9964 - val_loss: 0.0354 - val_acc: 0.9906
Epoch 102/500
11s - loss: 0.0090 - acc: 0.9964 - val_loss: 0.0378 - val_acc: 0.9899
Epoch 103/500
11s - loss: 0.0089 - acc: 0.9964 - val_loss: 0.0369 - val_acc: 0.9903
Epoch 104/500
11s - loss: 0.0091 - acc: 0.9964 - val_loss: 0.0369 - val_acc: 0.9901
Epoch 105/500
11s - loss: 0.0089 - acc: 0.9965 - val_loss: 0.0362 - val_acc: 0.9901
Epoch 106/500
11s - loss: 0.0087 - acc: 0.9966 - val_loss: 0.0383 - val_acc: 0.9902
Epoch 107/500
11s - loss: 0.0091 - acc: 0.9965 - val_loss: 0.0371 - val_acc: 0.9902
Epoch 108/500
11s - loss: 0.0086 - acc: 0.9966 - val_loss: 0.0358 - val_acc: 0.9901
Epoch 109/500
11s - loss: 0.0092 - acc: 0.9964 - val_loss: 0.0418 - val_acc: 0.9901
Epoch 110/500
11s - loss: 0.0092 - acc: 0.9964 - val_loss: 0.0358 - val_acc: 0.9902
Epoch 111/500
11s - loss: 0.0088 - acc: 0.9965 - val_loss: 0.0409 - val_acc: 0.9900
Epoch 112/500
11s - loss: 0.0086 - acc: 0.9967 - val_loss: 0.0379 - val_acc: 0.9901
Epoch 113/500
11s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0374 - val_acc: 0.9902
Epoch 114/500
11s - loss: 0.0085 - acc: 0.9967 - val_loss: 0.0395 - val_acc: 0.9902
Epoch 115/500
11s - loss: 0.0087 - acc: 0.9967 - val_loss: 0.0402 - val_acc: 0.9900
Epoch 116/500
11s - loss: 0.0088 - acc: 0.9965 - val_loss: 0.0412 - val_acc: 0.9900
Epoch 117/500
11s - loss: 0.0086 - acc: 0.9967 - val_loss: 0.0388 - val_acc: 0.9902
Epoch 118/500
11s - loss: 0.0082 - acc: 0.9968 - val_loss: 0.0389 - val_acc: 0.9903
Epoch 119/500
11s - loss: 0.0083 - acc: 0.9967 - val_loss: 0.0377 - val_acc: 0.9904
Epoch 120/500
11s - loss: 0.0081 - acc: 0.9969 - val_loss: 0.0374 - val_acc: 0.9903
Epoch 121/500
11s - loss: 0.0081 - acc: 0.9968 - val_loss: 0.0367 - val_acc: 0.9900
Epoch 122/500
11s - loss: 0.0084 - acc: 0.9968 - val_loss: 0.0376 - val_acc: 0.9902
Epoch 123/500
11s - loss: 0.0080 - acc: 0.9968 - val_loss: 0.0420 - val_acc: 0.9900
Epoch 124/500
11s - loss: 0.0082 - acc: 0.9968 - val_loss: 0.0386 - val_acc: 0.9902
Epoch 125/500
11s - loss: 0.0080 - acc: 0.9969 - val_loss: 0.0422 - val_acc: 0.9902
Epoch 126/500
11s - loss: 0.0083 - acc: 0.9969 - val_loss: 0.0393 - val_acc: 0.9901
Epoch 127/500
11s - loss: 0.0080 - acc: 0.9969 - val_loss: 0.0412 - val_acc: 0.9900
Epoch 128/500
11s - loss: 0.0082 - acc: 0.9969 - val_loss: 0.0399 - val_acc: 0.9902
Epoch 129/500
11s - loss: 0.0078 - acc: 0.9969 - val_loss: 0.0435 - val_acc: 0.9898
Epoch 130/500
10s - loss: 0.0077 - acc: 0.9970 - val_loss: 0.0401 - val_acc: 0.9902
Epoch 131/500
11s - loss: 0.0080 - acc: 0.9970 - val_loss: 0.0382 - val_acc: 0.9903
Epoch 132/500
11s - loss: 0.0075 - acc: 0.9972 - val_loss: 0.0367 - val_acc: 0.9901
Epoch 133/500
11s - loss: 0.0077 - acc: 0.9969 - val_loss: 0.0404 - val_acc: 0.9904
Epoch 134/500
11s - loss: 0.0077 - acc: 0.9969 - val_loss: 0.0388 - val_acc: 0.9903
Epoch 135/500
11s - loss: 0.0081 - acc: 0.9968 - val_loss: 0.0383 - val_acc: 0.9901
Epoch 136/500
11s - loss: 0.0080 - acc: 0.9969 - val_loss: 0.0391 - val_acc: 0.9904
Epoch 137/500
11s - loss: 0.0075 - acc: 0.9971 - val_loss: 0.0395 - val_acc: 0.9903
Epoch 138/500
11s - loss: 0.0079 - acc: 0.9970 - val_loss: 0.0425 - val_acc: 0.9900
87040/89204 [============================>.] - ETA: 0sTRAIN
             precision    recall  f1-score   support

          0       1.00      1.00      1.00    341713
          1       1.00      1.00      1.00     10371

avg / total       1.00      1.00      1.00    352084

TP=3682, FN=0, FP=9
precision=0.997561636413, recall=1.0, F1=0.998779329988

TP=3682, FN=0, FP=6
precision=0.998373101952, recall=1.0, F1=0.999185888738

TEST
             precision    recall  f1-score   support

          0       0.99      1.00      0.99     86710
          1       0.84      0.79      0.82      2494

avg / total       0.99      0.99      0.99     89204

TP=855, FN=41, FP=87
precision=0.907643312102, recall=0.9542410714285714, F1=0.930359085963

TP=873, FN=23, FP=24
precision=0.973244147157, recall=0.9743303571428571, F1=0.973786949247


Save / load


In [18]:
def save_model(model, path_to_json='model.json', path_to_weights='weights.hdf5'):
    with open(path_to_json, 'w') as f:
        f.write(model.to_json())
    model.save_weights(path_to_weights)

def load_model(path_to_json='model.json', path_to_weights='weights.hdf5'):
    with open(path_to_json) as f:
        model = model_from_json(f.read())
    model.load_weights(path_to_weights)
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    
    return model

# currently impossible without custom mods: https://github.com/fchollet/keras/issues/4274
# def load_model_sklearn(path_to_json='model.json', path_to_weights='weights.hdf5'):
#     clf = KerasClassifier(
#         build_fn=load_model,
#         batch_size=1024, epochs=500,
#         path_to_json=path_to_json, path_to_weights=path_to_weights,
#     )
#     
#     return clf

In [19]:
save_model(model, path_to_json='models/20170423_1_channel_ds1-4.json', path_to_weights='models/20170423_1_channel_ds1-4.hdf5')

In [20]:
loaded_model = load_model(path_to_json='models/20170423_1_channel_ds1-4.json', path_to_weights='models/20170423_1_channel_ds1-4.hdf5')

In [21]:
print(loaded_model.evaluate(X_test, y_test, batch_size=1024))


230400/231210 [============================>.] - ETA: 0s[0.052954115933474773, 0.98060205009722867]

In [22]:
y_test_predicted = loaded_model.predict_classes(X_test, batch_size=1024).ravel()
print(classification_report(y_test, y_test_predicted))
print(onset_metric(y_test, y_actual_onset_only_test, y_test_predicted, n_tolerance_frames_plus_minus=2))
print(onset_metric(y_test, y_actual_onset_only_test, y_test_predicted, n_tolerance_frames_plus_minus=5))


230400/231210 [============================>.] - ETA: 0s             precision    recall  f1-score   support

          0       0.99      0.99      0.99    217041
          1       0.84      0.85      0.84     14169

avg / total       0.98      0.98      0.98    231210

TP=4730, FN=117, FP=530
precision=0.899239543726, recall=0.9758613575407469, F1=0.935984960918

TP=4760, FN=87, FP=143
precision=0.970834183153, recall=0.9820507530431195, F1=0.97641025641

Find offset of onset detection labeling per dataset


In [22]:
for name, X, y, y_actual_onset_only in [
    ('ds1', X_ds1, y_ds1, y_actual_onset_only_ds1),
    ('ds2', X_ds2, y_ds2, y_actual_onset_only_ds2),
    ('ds3', X_ds3, y_ds3, y_actual_onset_only_ds3),
]:
    print(name)
    y_predicted = loaded_model.predict_classes(X, batch_size=1024).ravel()
    for shift_by in range(-5, 6):
        print(shift_by)
        y_shifted = shift(y, shift_by, cval=0)
        y_actual_onset_only_shifted = shift(y_actual_onset_only, shift_by, cval=0)
        print(classification_report(y_shifted, y_predicted))
        print(onset_metric(y_shifted, y_actual_onset_only_shifted, y_predicted, n_tolerance_frames_plus_minus=2))
        print(onset_metric(y_shifted, y_actual_onset_only_shifted, y_predicted, n_tolerance_frames_plus_minus=5))
    print('')


ds1
95232/96128 [============================>.] - ETA: 0s-5
             precision    recall  f1-score   support

          0       0.98      0.99      0.99     94123
          1       0.27      0.21      0.24      2005

avg / total       0.97      0.97      0.97     96128

TP=366, FN=423, FP=1096
precision=0.250341997264, recall=0.46387832699619774, F1=0.325188804976

TP=776, FN=13, FP=217
precision=0.781470292044, recall=0.9835234474017744, F1=0.870931537598

-4
             precision    recall  f1-score   support

          0       0.98      0.99      0.99     94123
          1       0.30      0.24      0.26      2005

avg / total       0.97      0.97      0.97     96128

TP=546, FN=243, FP=901
precision=0.377332411887, recall=0.6920152091254753, F1=0.488372093023

TP=780, FN=9, FP=53
precision=0.93637454982, recall=0.9885931558935361, F1=0.961775585697

-3
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     94123
          1       0.40      0.33      0.36      2005

avg / total       0.97      0.98      0.97     96128

TP=741, FN=48, FP=558
precision=0.570438799076, recall=0.9391634980988594, F1=0.709770114943

TP=784, FN=5, FP=30
precision=0.963144963145, recall=0.9936628643852978, F1=0.978165938865

-2
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     94123
          1       0.61      0.49      0.55      2005

avg / total       0.98      0.98      0.98     96128

TP=767, FN=22, FP=225
precision=0.773185483871, recall=0.9721166032953105, F1=0.861313868613

TP=786, FN=3, FP=23
precision=0.971569839308, recall=0.9961977186311787, F1=0.983729662078

-1
             precision    recall  f1-score   support

          0       0.99      1.00      1.00     94123
          1       0.84      0.67      0.74      2005

avg / total       0.99      0.99      0.99     96128

TP=766, FN=23, FP=60
precision=0.927360774818, recall=0.9708491761723701, F1=0.948606811146

TP=787, FN=2, FP=19
precision=0.976426799007, recall=0.9974651457541192, F1=0.986833855799

0
             precision    recall  f1-score   support

          0       0.99      1.00      1.00     94123
          1       0.84      0.68      0.75      2005

avg / total       0.99      0.99      0.99     96128

TP=756, FN=33, FP=39
precision=0.950943396226, recall=0.9581749049429658, F1=0.954545454545

TP=785, FN=4, FP=16
precision=0.980024968789, recall=0.9949302915082383, F1=0.987421383648

1
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     94123
          1       0.63      0.50      0.56      2005

avg / total       0.98      0.98      0.98     96128

TP=734, FN=55, FP=204
precision=0.782515991471, recall=0.9302915082382763, F1=0.85002895194

TP=784, FN=5, FP=14
precision=0.982456140351, recall=0.9936628643852978, F1=0.988027725268

2
             precision    recall  f1-score   support

          0       0.99      0.99      0.99     94123
          1       0.38      0.30      0.34      2005

avg / total       0.97      0.98      0.97     96128

TP=700, FN=89, FP=561
precision=0.555114988105, recall=0.8871989860583016, F1=0.682926829268

TP=779, FN=10, FP=15
precision=0.981108312343, recall=0.9873257287705957, F1=0.984207201516

3
             precision    recall  f1-score   support

          0       0.98      0.99      0.99     94123
          1       0.26      0.21      0.23      2005

avg / total       0.97      0.97      0.97     96128

TP=497, FN=292, FP=947
precision=0.344182825485, recall=0.6299112801013942, F1=0.445141065831

TP=766, FN=23, FP=17
precision=0.978288633461, recall=0.9708491761723701, F1=0.974554707379

4
             precision    recall  f1-score   support

          0       0.98      0.99      0.98     94123
          1       0.24      0.19      0.21      2005

avg / total       0.97      0.97      0.97     96128

TP=322, FN=467, FP=1140
precision=0.22024623803, recall=0.40811153358681873, F1=0.286095068858

TP=740, FN=49, FP=189
precision=0.796555435953, recall=0.9378960709759189, F1=0.861466821886

5
             precision    recall  f1-score   support

          0       0.98      0.99      0.98     94123
          1       0.22      0.17      0.19      2005

avg / total       0.97      0.97      0.97     96128

TP=272, FN=517, FP=1191
precision=0.185919343814, recall=0.34474017743979724, F1=0.241563055062

TP=706, FN=83, FP=548
precision=0.562998405104, recall=0.8948035487959443, F1=0.691140479687


ds2
345088/345160 [============================>.] - ETA: 0s-5
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.01      0.01      0.01     10860

avg / total       0.94      0.94      0.94    345160

TP=70, FN=3719, FP=10154
precision=0.00684663536776, recall=0.018474531538664556, F1=0.00999072290016

TP=466, FN=3323, FP=9682
precision=0.0459203783997, recall=0.12298759567168119, F1=0.0668723541652

-4
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.01      0.01      0.01     10860

avg / total       0.94      0.94      0.94    345160

TP=100, FN=3689, FP=10113
precision=0.00979144227945, recall=0.026392187912377935, F1=0.0142836737609

TP=1321, FN=2468, FP=8557
precision=0.1337315246, recall=0.34864080232251254, F1=0.193312358235

-3
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.01      0.01      0.01     10860

avg / total       0.94      0.94      0.94    345160

TP=174, FN=3615, FP=10026
precision=0.0170588235294, recall=0.04592240696753761, F1=0.0248766888269

TP=2877, FN=912, FP=6050
precision=0.322280721407, recall=0.7593032462391133, F1=0.452500786411

-2
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.02      0.02      0.02     10860

avg / total       0.94      0.94      0.94    345160

TP=456, FN=3333, FP=9701
precision=0.0448951462046, recall=0.12034837688044339, F1=0.0653950953678

TP=3555, FN=234, FP=3244
precision=0.522871010443, recall=0.9382422802850356, F1=0.671514922554

-1
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.05      0.05      0.05     10860

avg / total       0.94      0.94      0.94    345160

TP=1315, FN=2474, FP=8575
precision=0.132962588473, recall=0.34705727104776984, F1=0.192265516485

TP=3681, FN=108, FP=1188
precision=0.756007393715, recall=0.9714964370546318, F1=0.850311850312

0
             precision    recall  f1-score   support

          0       0.97      0.97      0.97    334300
          1       0.16      0.15      0.16     10860

avg / total       0.95      0.95      0.95    345160

TP=2870, FN=919, FP=6066
precision=0.321172784244, recall=0.7574557930852468, F1=0.451080550098

TP=3726, FN=63, FP=497
precision=0.882311153209, recall=0.9833729216152018, F1=0.930104842736

1
             precision    recall  f1-score   support

          0       0.98      0.98      0.98    334300
          1       0.40      0.38      0.39     10860

avg / total       0.96      0.96      0.96    345160

TP=3547, FN=242, FP=3260
precision=0.5210812399, recall=0.9361309052520453, F1=0.669497923745

TP=3748, FN=41, FP=318
precision=0.921790457452, recall=0.9891792029559251, F1=0.954296626353

2
             precision    recall  f1-score   support

          0       0.99      0.99      0.99    334300
          1       0.65      0.61      0.63     10860

avg / total       0.98      0.98      0.98    345160

TP=3654, FN=135, FP=1243
precision=0.746171125179, recall=0.9643705463182898, F1=0.841353902832

TP=3753, FN=36, FP=229
precision=0.942491210447, recall=0.9904988123515439, F1=0.965898854716

3
             precision    recall  f1-score   support

          0       0.99      0.99      0.99    334300
          1       0.75      0.71      0.73     10860

avg / total       0.98      0.98      0.98    345160

TP=3670, FN=119, FP=822
precision=0.817008014248, recall=0.9685932963842703, F1=0.886366380872

TP=3752, FN=37, FP=188
precision=0.952284263959, recall=0.9902348904724202, F1=0.970888860137

4
             precision    recall  f1-score   support

          0       0.99      0.99      0.99    334300
          1       0.57      0.54      0.56     10860

avg / total       0.97      0.97      0.97    345160

TP=3608, FN=181, FP=1692
precision=0.680754716981, recall=0.952230139878596, F1=0.793926724612

TP=3751, FN=38, FP=174
precision=0.955668789809, recall=0.9899709685932964, F1=0.972517500648

5
             precision    recall  f1-score   support

          0       0.98      0.98      0.98    334300
          1       0.31      0.30      0.30     10860

avg / total       0.96      0.96      0.96    345160

TP=3285, FN=504, FP=4093
precision=0.445242613174, recall=0.8669833729216152, F1=0.588340646548

TP=3733, FN=56, FP=207
precision=0.947461928934, recall=0.9852203747690683, F1=0.965972312071


ds3
5120/5526 [==========================>...] - ETA: 0s-5
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.02      0.02      0.02       135

avg / total       0.95      0.95      0.95      5526

TP=5, FN=41, FP=128
precision=0.0375939849624, recall=0.10869565217391304, F1=0.0558659217877

TP=40, FN=6, FP=58
precision=0.408163265306, recall=0.8695652173913043, F1=0.555555555556

-4
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.04      0.04      0.04       135

avg / total       0.95      0.95      0.95      5526

TP=10, FN=36, FP=121
precision=0.0763358778626, recall=0.21739130434782608, F1=0.112994350282

TP=45, FN=1, FP=27
precision=0.625, recall=0.9782608695652174, F1=0.762711864407

-3
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.10      0.10      0.10       135

avg / total       0.96      0.96      0.96      5526

TP=26, FN=20, FP=96
precision=0.213114754098, recall=0.5652173913043478, F1=0.309523809524

TP=45, FN=1, FP=12
precision=0.789473684211, recall=0.9782608695652174, F1=0.873786407767

-2
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.28      0.28      0.28       135

avg / total       0.96      0.96      0.96      5526

TP=39, FN=7, FP=61
precision=0.39, recall=0.8478260869565217, F1=0.534246575342

TP=46, FN=0, FP=4
precision=0.92, recall=1.0, F1=0.958333333333

-1
             precision    recall  f1-score   support

          0       0.99      0.99      0.99      5391
          1       0.53      0.53      0.53       135

avg / total       0.98      0.98      0.98      5526

TP=44, FN=2, FP=29
precision=0.602739726027, recall=0.9565217391304348, F1=0.739495798319

TP=45, FN=1, FP=3
precision=0.9375, recall=0.9782608695652174, F1=0.957446808511

0
             precision    recall  f1-score   support

          0       0.99      0.99      0.99      5391
          1       0.70      0.70      0.70       135

avg / total       0.99      0.99      0.99      5526

TP=44, FN=2, FP=15
precision=0.745762711864, recall=0.9565217391304348, F1=0.838095238095

TP=45, FN=1, FP=2
precision=0.957446808511, recall=0.9782608695652174, F1=0.967741935484

1
             precision    recall  f1-score   support

          0       0.99      0.99      0.99      5391
          1       0.64      0.64      0.64       135

avg / total       0.98      0.98      0.98      5526

TP=45, FN=1, FP=16
precision=0.737704918033, recall=0.9782608695652174, F1=0.841121495327

TP=45, FN=1, FP=3
precision=0.9375, recall=0.9782608695652174, F1=0.957446808511

2
             precision    recall  f1-score   support

          0       0.99      0.99      0.99      5391
          1       0.44      0.44      0.44       135

avg / total       0.97      0.97      0.97      5526

TP=43, FN=3, FP=40
precision=0.518072289157, recall=0.9347826086956522, F1=0.666666666667

TP=45, FN=1, FP=3
precision=0.9375, recall=0.9782608695652174, F1=0.957446808511

3
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.20      0.20      0.20       135

avg / total       0.96      0.96      0.96      5526

TP=35, FN=11, FP=74
precision=0.321100917431, recall=0.7608695652173914, F1=0.451612903226

TP=45, FN=1, FP=4
precision=0.918367346939, recall=0.9782608695652174, F1=0.947368421053

4
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.08      0.08      0.08       135

avg / total       0.96      0.96      0.96      5526

TP=20, FN=26, FP=106
precision=0.15873015873, recall=0.43478260869565216, F1=0.232558139535

TP=45, FN=1, FP=12
precision=0.789473684211, recall=0.9782608695652174, F1=0.873786407767

5
             precision    recall  f1-score   support

          0       0.98      0.98      0.98      5391
          1       0.02      0.02      0.02       135

avg / total       0.95      0.95      0.95      5526

TP=10, FN=36, FP=122
precision=0.0757575757576, recall=0.21739130434782608, F1=0.112359550562

TP=43, FN=3, FP=36
precision=0.544303797468, recall=0.9347826086956522, F1=0.688


Visualization


In [23]:
print(loaded_model.layers[0].get_weights()[0].shape)


(7, 3, 1, 10)

In [24]:
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.ma as ma

def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
    """Wrapper around pl.imshow"""
    if cmap is None:
        cmap = cm.jet
    if vmin is None:
        vmin = data.min()
    if vmax is None:
        vmax = data.max()
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
    plt.colorbar(im, cax=cax)

def make_mosaic(imgs, nrows, ncols, border=1):
    """
    Given a set of images with all the same shape, makes a
    mosaic with nrows and ncols
    """
    nimgs = imgs.shape[0]
    imshape = imgs.shape[1:]
    
    mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
                            ncols * imshape[1] + (ncols - 1) * border),
                            dtype=np.float32)
    
    paddedh = imshape[0] + border
    paddedw = imshape[1] + border
    for i in range(nimgs):
        row = int(np.floor(i / ncols))
        col = i % ncols
        
        mosaic[row * paddedh:row * paddedh + imshape[0],
               col * paddedw:col * paddedw + imshape[1]] = imgs[i]
    return mosaic

In [25]:
# Visualize weights
W = loaded_model.layers[0].get_weights()[0]
# (7, 3, 1, 10) -> (10, 3, 7)
W = np.squeeze(W).T
# (10, 3, 7) -> (10, 7, 3)
W_filters_transposed = np.empty((W.shape[0], W.shape[2], W.shape[1]))
for i, array in enumerate(W):
    W_filters_transposed[i] = array.T
print(W_filters_transposed.shape)

plt.figure(figsize=(15, 15))
plt.title('conv1 weights')
nice_imshow(plt.gca(), make_mosaic(W_filters_transposed, 2, 5), cmap=cm.binary)
# nice_imshow(plt.gca(), make_mosaic(W_filters_transposed, 4, 4), cmap=cm.binary)


(10, 7, 3)