In [51]:
from preprocess import *
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LSTM
from keras.utils import to_categorical
import wandb
from wandb.keras import WandbCallback
import matplotlib.pyplot as plt
In [26]:
wandb.init()
config = wandb.config
config.max_len = 11
config.buckets = 20
# Save data to array file first
save_data_to_array(max_len=config.max_len, n_mfcc=config.buckets)
labels=["bed", "happy", "cat"]
Saving vectors of label - 'bed': 0%| | 0/1713 [00:00<?, ?it/s]
W&B Run: https://app.wandb.ai/l2k2/ml-class-videos_cnn-audio/runs/n5fqsruy
Call `%%wandb` in the cell containing your training loop to display live results.
Saving vectors of label - 'bed': 100%|██████████| 1713/1713 [00:12<00:00, 140.67it/s]
Saving vectors of label - 'happy': 100%|██████████| 1742/1742 [00:13<00:00, 125.22it/s]
Saving vectors of label - 'cat': 100%|██████████| 1733/1733 [00:13<00:00, 131.24it/s]
In [5]:
# # Loading train set and test set
X_train, X_test, y_train, y_test = get_train_test()
In [32]:
# # Feature dimension
channels = 1
config.epochs = 50
config.batch_size = 100
num_classes = 3
X_train = X_train.reshape(X_train.shape[0], config.buckets, config.max_len, channels)
X_test = X_test.reshape(X_test.shape[0], config.buckets, config.max_len, channels)
In [37]:
plt.imshow(X_train[100, :, :, 0])
print(y_train[100])
0.0
In [29]:
y_train_hot = to_categorical(y_train)
y_test_hot = to_categorical(y_test)
In [54]:
X_train = X_train.reshape(X_train.shape[0], config.buckets, config.max_len)
X_test = X_test.reshape(X_test.shape[0], config.buckets, config.max_len)
model = Sequential()
model.add(Flatten(input_shape=(config.buckets, config.max_len)))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
In [55]:
wandb.init()
model.fit(X_train, y_train_hot, epochs=config.epochs, validation_data=(X_test, y_test_hot), callbacks=[WandbCallback(data_type="image", labels=labels)])
W&B Run: https://app.wandb.ai/l2k2/ml-class-videos_cnn-audio/runs/uaom4fuw
Call `%%wandb` in the cell containing your training loop to display live results.
Train on 3112 samples, validate on 2076 samples
Epoch 1/50
3112/3112 [==============================] - 1s 252us/step - loss: 9.6424 - acc: 0.3817 - val_loss: 8.9929 - val_acc: 0.4253
Epoch 2/50
3112/3112 [==============================] - 0s 149us/step - loss: 8.8110 - acc: 0.4341 - val_loss: 8.2306 - val_acc: 0.4653
Epoch 3/50
3112/3112 [==============================] - 0s 152us/step - loss: 7.8625 - acc: 0.4820 - val_loss: 6.7555 - val_acc: 0.5313
Epoch 4/50
3112/3112 [==============================] - 0s 150us/step - loss: 5.1861 - acc: 0.6144 - val_loss: 4.2210 - val_acc: 0.6696
Epoch 5/50
3112/3112 [==============================] - 0s 144us/step - loss: 3.5821 - acc: 0.7208 - val_loss: 3.6203 - val_acc: 0.7163
Epoch 6/50
3112/3112 [==============================] - 1s 213us/step - loss: 2.9745 - acc: 0.7561 - val_loss: 3.2614 - val_acc: 0.7447
Epoch 7/50
3112/3112 [==============================] - 0s 136us/step - loss: 2.7470 - acc: 0.7789 - val_loss: 3.3918 - val_acc: 0.7331
Epoch 8/50
3112/3112 [==============================] - 0s 143us/step - loss: 2.5837 - acc: 0.7895 - val_loss: 2.9279 - val_acc: 0.7693
Epoch 9/50
3112/3112 [==============================] - 0s 136us/step - loss: 2.2898 - acc: 0.8040 - val_loss: 2.8437 - val_acc: 0.7799
Epoch 10/50
3112/3112 [==============================] - 0s 145us/step - loss: 2.2676 - acc: 0.8107 - val_loss: 2.8002 - val_acc: 0.7722
Epoch 11/50
3112/3112 [==============================] - 0s 139us/step - loss: 2.1765 - acc: 0.8217 - val_loss: 2.7590 - val_acc: 0.7876
Epoch 12/50
3112/3112 [==============================] - 0s 139us/step - loss: 2.0598 - acc: 0.8258 - val_loss: 2.7624 - val_acc: 0.7837
Epoch 13/50
3112/3112 [==============================] - 0s 138us/step - loss: 2.0148 - acc: 0.8268 - val_loss: 2.7273 - val_acc: 0.7808
Epoch 14/50
3112/3112 [==============================] - 0s 136us/step - loss: 1.9404 - acc: 0.8393 - val_loss: 2.8900 - val_acc: 0.7659
Epoch 15/50
3112/3112 [==============================] - 0s 152us/step - loss: 1.9122 - acc: 0.8287 - val_loss: 2.7173 - val_acc: 0.7832
Epoch 16/50
3112/3112 [==============================] - 0s 138us/step - loss: 1.8676 - acc: 0.8445 - val_loss: 2.7524 - val_acc: 0.7775
Epoch 17/50
3112/3112 [==============================] - 0s 143us/step - loss: 2.0001 - acc: 0.8242 - val_loss: 2.6138 - val_acc: 0.7982
Epoch 18/50
3112/3112 [==============================] - 0s 136us/step - loss: 1.7973 - acc: 0.8480 - val_loss: 2.6646 - val_acc: 0.7823
Epoch 19/50
3112/3112 [==============================] - 0s 144us/step - loss: 1.7860 - acc: 0.8461 - val_loss: 2.6840 - val_acc: 0.7828
Epoch 20/50
3112/3112 [==============================] - 0s 136us/step - loss: 1.7758 - acc: 0.8442 - val_loss: 2.6632 - val_acc: 0.7784
Epoch 21/50
3112/3112 [==============================] - 0s 143us/step - loss: 1.6855 - acc: 0.8570 - val_loss: 2.6435 - val_acc: 0.7847
Epoch 22/50
3112/3112 [==============================] - 0s 139us/step - loss: 1.6973 - acc: 0.8464 - val_loss: 2.6579 - val_acc: 0.7842
Epoch 23/50
3112/3112 [==============================] - 0s 145us/step - loss: 1.7248 - acc: 0.8403 - val_loss: 2.6522 - val_acc: 0.7832
Epoch 24/50
3112/3112 [==============================] - 0s 138us/step - loss: 1.6680 - acc: 0.8551 - val_loss: 2.9123 - val_acc: 0.7664
Epoch 25/50
3112/3112 [==============================] - 0s 134us/step - loss: 1.6825 - acc: 0.8544 - val_loss: 2.7090 - val_acc: 0.7856
Epoch 26/50
3112/3112 [==============================] - 0s 152us/step - loss: 1.7272 - acc: 0.8445 - val_loss: 2.6379 - val_acc: 0.7861
Epoch 27/50
3112/3112 [==============================] - 0s 140us/step - loss: 1.6018 - acc: 0.8599 - val_loss: 2.6723 - val_acc: 0.7861
Epoch 28/50
3112/3112 [==============================] - 0s 147us/step - loss: 1.5737 - acc: 0.8583 - val_loss: 2.8168 - val_acc: 0.7726
Epoch 29/50
3112/3112 [==============================] - 0s 135us/step - loss: 1.6332 - acc: 0.8499 - val_loss: 2.6335 - val_acc: 0.7871
Epoch 30/50
3112/3112 [==============================] - 0s 144us/step - loss: 1.6197 - acc: 0.8541 - val_loss: 2.7046 - val_acc: 0.7813
Epoch 31/50
3112/3112 [==============================] - 0s 137us/step - loss: 1.5724 - acc: 0.8583 - val_loss: 2.6458 - val_acc: 0.7847
Epoch 32/50
3112/3112 [==============================] - 0s 147us/step - loss: 1.5384 - acc: 0.8573 - val_loss: 2.7548 - val_acc: 0.7784
Epoch 33/50
3112/3112 [==============================] - 0s 138us/step - loss: 1.5416 - acc: 0.8560 - val_loss: 2.6942 - val_acc: 0.7808
Epoch 34/50
3112/3112 [==============================] - 0s 145us/step - loss: 1.5780 - acc: 0.8538 - val_loss: 2.7261 - val_acc: 0.7755
Epoch 35/50
3112/3112 [==============================] - 0s 134us/step - loss: 1.5478 - acc: 0.8528 - val_loss: 2.6358 - val_acc: 0.7832
Epoch 36/50
3112/3112 [==============================] - 0s 135us/step - loss: 1.6300 - acc: 0.8477 - val_loss: 2.6163 - val_acc: 0.7871
Epoch 37/50
3112/3112 [==============================] - 0s 152us/step - loss: 1.5446 - acc: 0.8541 - val_loss: 2.6238 - val_acc: 0.7871
Epoch 38/50
3112/3112 [==============================] - 0s 137us/step - loss: 1.4815 - acc: 0.8631 - val_loss: 2.6222 - val_acc: 0.7876
Epoch 39/50
3112/3112 [==============================] - 0s 143us/step - loss: 1.5307 - acc: 0.8596 - val_loss: 2.6622 - val_acc: 0.7842
Epoch 40/50
3112/3112 [==============================] - 0s 134us/step - loss: 1.4757 - acc: 0.8615 - val_loss: 2.6584 - val_acc: 0.7775
Epoch 41/50
3112/3112 [==============================] - 0s 142us/step - loss: 1.4912 - acc: 0.8625 - val_loss: 2.7109 - val_acc: 0.7803
Epoch 42/50
3112/3112 [==============================] - 0s 138us/step - loss: 1.3912 - acc: 0.8676 - val_loss: 2.6428 - val_acc: 0.7823
Epoch 43/50
3112/3112 [==============================] - 0s 142us/step - loss: 1.4650 - acc: 0.8650 - val_loss: 2.7254 - val_acc: 0.7823
Epoch 44/50
3112/3112 [==============================] - 0s 136us/step - loss: 1.4426 - acc: 0.8602 - val_loss: 2.6579 - val_acc: 0.7861
Epoch 45/50
3112/3112 [==============================] - 0s 136us/step - loss: 1.3862 - acc: 0.8689 - val_loss: 2.7029 - val_acc: 0.7823
Epoch 46/50
3112/3112 [==============================] - 0s 147us/step - loss: 1.3805 - acc: 0.8638 - val_loss: 2.7139 - val_acc: 0.7770
Epoch 47/50
3112/3112 [==============================] - 0s 135us/step - loss: 1.4554 - acc: 0.8573 - val_loss: 2.6637 - val_acc: 0.7866
Epoch 48/50
3112/3112 [==============================] - 0s 152us/step - loss: 1.3979 - acc: 0.8625 - val_loss: 2.8353 - val_acc: 0.7635
Epoch 49/50
3112/3112 [==============================] - 0s 138us/step - loss: 1.4165 - acc: 0.8605 - val_loss: 2.6396 - val_acc: 0.7852
Epoch 50/50
3112/3112 [==============================] - 0s 146us/step - loss: 1.3335 - acc: 0.8689 - val_loss: 2.6142 - val_acc: 0.7837
Out[55]:
<keras.callbacks.History at 0x7f89a361dc50>
In [53]:
# build model
model = Sequential()
model.add(LSTM(16, input_shape=(config.buckets, config.max_len, channels), activation="sigmoid"))
model.add(Dense(1, activation='sigmoid'))
model.add(Dense(num_classes, activation='softmax'))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-53-6ac7b4c7af2c> in <module>
1 # build model
2 model = Sequential()
----> 3 model.add(LSTM(16, input_shape=(config.buckets, config.max_len, channels), activation="sigmoid"))
4 model.add(Dense(1, activation='sigmoid'))
5 model.add(Dense(num_classes, activation='softmax'))
/usr/local/lib/python3.6/dist-packages/keras/engine/sequential.py in add(self, layer)
163 # and create the node connecting the current layer
164 # to the input layer we just created.
--> 165 layer(x)
166 set_inputs = True
167 else:
/usr/local/lib/python3.6/dist-packages/keras/layers/recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
530
531 if initial_state is None and constants is None:
--> 532 return super(RNN, self).__call__(inputs, **kwargs)
533
534 # If any of `initial_state` or `constants` are specified and are Keras
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in __call__(self, inputs, **kwargs)
412 # Raise exceptions in case the input is not compatible
413 # with the input_spec specified in the layer constructor.
--> 414 self.assert_input_compatibility(inputs)
415
416 # Collect input shapes to build layer.
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in assert_input_compatibility(self, inputs)
309 self.name + ': expected ndim=' +
310 str(spec.ndim) + ', found ndim=' +
--> 311 str(K.ndim(x)))
312 if spec.max_ndim is not None:
313 ndim = K.ndim(x)
ValueError: Input 0 is incompatible with layer lstm_2: expected ndim=3, found ndim=4
In [48]:
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
In [49]:
wandb.init()
model.fit(X_train, y_train_hot, epochs=config.epochs, validation_data=(X_test, y_test_hot), callbacks=[WandbCallback(data_type="image", labels=labels)])
W&B Run: https://app.wandb.ai/l2k2/ml-class-videos_cnn-audio/runs/dtwf65g1
Call `%%wandb` in the cell containing your training loop to display live results.
Train on 3112 samples, validate on 2076 samples
Epoch 1/50
3112/3112 [==============================] - 1s 407us/step - loss: 1.9623 - acc: 0.4762 - val_loss: 0.7156 - val_acc: 0.6946
Epoch 2/50
3112/3112 [==============================] - 1s 241us/step - loss: 0.7342 - acc: 0.6655 - val_loss: 0.5495 - val_acc: 0.7905
Epoch 3/50
3112/3112 [==============================] - 1s 236us/step - loss: 0.6346 - acc: 0.7265 - val_loss: 0.4503 - val_acc: 0.8295
Epoch 4/50
3112/3112 [==============================] - 1s 238us/step - loss: 0.5327 - acc: 0.7847 - val_loss: 0.3875 - val_acc: 0.8521
Epoch 5/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.4546 - acc: 0.8194 - val_loss: 0.3664 - val_acc: 0.8545
Epoch 6/50
3112/3112 [==============================] - 1s 229us/step - loss: 0.4234 - acc: 0.8316 - val_loss: 0.3349 - val_acc: 0.8801
Epoch 7/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.3969 - acc: 0.8451 - val_loss: 0.3191 - val_acc: 0.8844
Epoch 8/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.3583 - acc: 0.8670 - val_loss: 0.2884 - val_acc: 0.8863
Epoch 9/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.3428 - acc: 0.8663 - val_loss: 0.2585 - val_acc: 0.9017
Epoch 10/50
3112/3112 [==============================] - 1s 227us/step - loss: 0.3109 - acc: 0.8753 - val_loss: 0.2971 - val_acc: 0.8868
Epoch 11/50
3112/3112 [==============================] - 1s 228us/step - loss: 0.3099 - acc: 0.8869 - val_loss: 0.2573 - val_acc: 0.9032
Epoch 12/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.2891 - acc: 0.8856 - val_loss: 0.2547 - val_acc: 0.9037
Epoch 13/50
3112/3112 [==============================] - 1s 223us/step - loss: 0.2641 - acc: 0.8997 - val_loss: 0.2315 - val_acc: 0.9128
Epoch 14/50
3112/3112 [==============================] - 1s 231us/step - loss: 0.2698 - acc: 0.8962 - val_loss: 0.2482 - val_acc: 0.9056
Epoch 15/50
3112/3112 [==============================] - 1s 229us/step - loss: 0.2616 - acc: 0.9026 - val_loss: 0.2451 - val_acc: 0.9051
Epoch 16/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.2394 - acc: 0.9100 - val_loss: 0.2324 - val_acc: 0.9099
Epoch 17/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.2264 - acc: 0.9075 - val_loss: 0.2356 - val_acc: 0.9090
Epoch 18/50
3112/3112 [==============================] - 1s 239us/step - loss: 0.2157 - acc: 0.9216 - val_loss: 0.2173 - val_acc: 0.9157
Epoch 19/50
3112/3112 [==============================] - 1s 224us/step - loss: 0.2293 - acc: 0.9139 - val_loss: 0.2387 - val_acc: 0.9022
Epoch 20/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.2352 - acc: 0.9148 - val_loss: 0.2359 - val_acc: 0.9128
Epoch 21/50
3112/3112 [==============================] - 1s 229us/step - loss: 0.2068 - acc: 0.9229 - val_loss: 0.2154 - val_acc: 0.9249
Epoch 22/50
3112/3112 [==============================] - 1s 228us/step - loss: 0.1956 - acc: 0.9267 - val_loss: 0.2237 - val_acc: 0.9258
Epoch 23/50
3112/3112 [==============================] - 1s 228us/step - loss: 0.1971 - acc: 0.9242 - val_loss: 0.3137 - val_acc: 0.8935
Epoch 24/50
3112/3112 [==============================] - 1s 223us/step - loss: 0.2352 - acc: 0.9136 - val_loss: 0.2334 - val_acc: 0.9176
Epoch 25/50
3112/3112 [==============================] - 1s 233us/step - loss: 0.1856 - acc: 0.9316 - val_loss: 0.2142 - val_acc: 0.9152
Epoch 26/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.1832 - acc: 0.9312 - val_loss: 0.2065 - val_acc: 0.9292
Epoch 27/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.1804 - acc: 0.9328 - val_loss: 0.2450 - val_acc: 0.9157
Epoch 28/50
3112/3112 [==============================] - 1s 224us/step - loss: 0.1728 - acc: 0.9354 - val_loss: 0.2183 - val_acc: 0.9196
Epoch 29/50
3112/3112 [==============================] - 1s 236us/step - loss: 0.1798 - acc: 0.9393 - val_loss: 0.2097 - val_acc: 0.9234
Epoch 30/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1751 - acc: 0.9338 - val_loss: 0.2001 - val_acc: 0.9292
Epoch 31/50
3112/3112 [==============================] - 1s 229us/step - loss: 0.1656 - acc: 0.9418 - val_loss: 0.2071 - val_acc: 0.9200
Epoch 32/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1761 - acc: 0.9354 - val_loss: 0.2080 - val_acc: 0.9282
Epoch 33/50
3112/3112 [==============================] - 1s 231us/step - loss: 0.1466 - acc: 0.9499 - val_loss: 0.2136 - val_acc: 0.9277
Epoch 34/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1671 - acc: 0.9409 - val_loss: 0.2059 - val_acc: 0.9277
Epoch 35/50
3112/3112 [==============================] - 1s 221us/step - loss: 0.1617 - acc: 0.9418 - val_loss: 0.2105 - val_acc: 0.9273
Epoch 36/50
3112/3112 [==============================] - 1s 230us/step - loss: 0.1632 - acc: 0.9415 - val_loss: 0.2111 - val_acc: 0.9297
Epoch 37/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1933 - acc: 0.9351 - val_loss: 0.2138 - val_acc: 0.9215
Epoch 38/50
3112/3112 [==============================] - 1s 222us/step - loss: 0.1430 - acc: 0.9467 - val_loss: 0.1988 - val_acc: 0.9321
Epoch 39/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1409 - acc: 0.9470 - val_loss: 0.2236 - val_acc: 0.9263
Epoch 40/50
3112/3112 [==============================] - 1s 231us/step - loss: 0.1309 - acc: 0.9553 - val_loss: 0.2243 - val_acc: 0.9292
Epoch 41/50
3112/3112 [==============================] - 1s 226us/step - loss: 0.1385 - acc: 0.9492 - val_loss: 0.2892 - val_acc: 0.9090
Epoch 42/50
3112/3112 [==============================] - 1s 225us/step - loss: 0.1637 - acc: 0.9389 - val_loss: 0.2295 - val_acc: 0.9306
Epoch 43/50
3112/3112 [==============================] - 1s 231us/step - loss: 0.1329 - acc: 0.9492 - val_loss: 0.2125 - val_acc: 0.9229
Epoch 44/50
3112/3112 [==============================] - 1s 233us/step - loss: 0.1269 - acc: 0.9528 - val_loss: 0.2149 - val_acc: 0.9258
Epoch 45/50
3112/3112 [==============================] - 1s 227us/step - loss: 0.1230 - acc: 0.9569 - val_loss: 0.2008 - val_acc: 0.9297
Epoch 46/50
3112/3112 [==============================] - 1s 222us/step - loss: 0.1400 - acc: 0.9454 - val_loss: 0.2512 - val_acc: 0.9210
Epoch 47/50
3112/3112 [==============================] - 1s 227us/step - loss: 0.1363 - acc: 0.9486 - val_loss: 0.2346 - val_acc: 0.9215
Epoch 48/50
3112/3112 [==============================] - 1s 227us/step - loss: 0.1239 - acc: 0.9550 - val_loss: 0.2463 - val_acc: 0.9147
Epoch 49/50
3112/3112 [==============================] - 1s 236us/step - loss: 0.1379 - acc: 0.9499 - val_loss: 0.2148 - val_acc: 0.9306
Epoch 50/50
3112/3112 [==============================] - 1s 221us/step - loss: 0.1204 - acc: 0.9528 - val_loss: 0.2149 - val_acc: 0.9311
Out[49]:
<keras.callbacks.History at 0x7f89a8f7da20>
In [ ]:
Content source: lukas/ml-class
Similar notebooks: