Kaggle Dogbreeds


In [1]:
%reload_ext autoreload
%autoreload 2
%matplotlib inline

In [2]:
from fastai.imports import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *

from fastai.plots import *

In [3]:
PATH = "data/dogbreeds/"
TRAIN = "train/"; VALID = "valid/"; TEST = "test/"; SUBM = "subm/"; RSLT = "results/"

In [4]:
sz = 224
bs = 64
# arch = resnext101_64
arch = resnext50
# arch = resnet34
# arch = resnet50

Explanation of the following code: get_cv_idxs(..):

def get_cv_idxs(n, cv_idx=4, val_pct=0.2, seed=42):
1    np.random.seed(seed)
2    n_val = int(val_pct*n)
3    idx_start = cv_idx*n_val
4    idxs = np.random.permutation(n)
5    return idxs[idx_start:idx_start+n_val]

function takes in n number, cv_idx cross-validation index (default 4), val_pct validation percent (default 20%), and a seed (default meaning of life).

  • Line 1 seeds the NumPy random number generator (ensures reproducible results).
  • Line 2 sets n_val number of valids to be floor(validation percent x total number).
  • Line 3 sets idx_start starting index to be the number of valids n_val times the cross-validation index.
  • Line 4 sets the indices idxs to be a random permutation of all integers [0:n)
  • Line 5 returns a n_val wide slice of the idxs array starting at idx_start

I don't know yet what the significance of cv_idx is.


In [5]:
# Counts lines in labels.csv. File contains all labels --> can use as a filecount
# Pass filecount n into val_idxs(n) 
label_csv = f'{PATH}labels.csv'
n = len(list(open(label_csv)))-1
val_idxs = get_cv_idxs(n)

In [6]:
tfms = tfms_from_model(arch, bs, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_csv(path=PATH, folder='train', csv_fname=label_csv,
                                    bs=bs, tfms=tfms, val_idxs=val_idxs, suffix='.jpg')

In [7]:
learn = ConvLearner.pretrained(arch, data=data, precompute=False)

In [8]:
data.trn_dl.dataset.fnames[:10]


Out[8]:
array(['train/000bec180eb18c7604dcecc8fe0dba07.jpg', 'train/001cdf01b096e06d78e9e5112d419397.jpg',
       'train/00214f311d5d2247d5dfe4fe24b2303d.jpg', 'train/00290d3e1fdd27226ba27a8ce248ce85.jpg',
       'train/002a283a315af96eaea0e28e7163b21b.jpg', 'train/003df8b8a8b05244b1d920bb6cf451f9.jpg',
       'train/004396df1acd0f1247b740ca2b14616e.jpg', 'train/00693b8bc2470375cc744a6391d397ec.jpg',
       'train/0075dc49dab4024d12fafe67074d8a81.jpg', 'train/00792e341f3c6eb33663e415d0715370.jpg'],
      dtype='<U42')

In [9]:
list(zip(data.classes))[:10]


Out[9]:
[('affenpinscher',),
 ('afghan_hound',),
 ('african_hunting_dog',),
 ('airedale',),
 ('american_staffordshire_terrier',),
 ('appenzeller',),
 ('australian_terrier',),
 ('basenji',),
 ('basset',),
 ('beagle',)]

In [10]:
lrf = learn.lr_find()
learn.sched.plot()


  2%|▏         | 2/128 [00:02<02:29,  1.19s/it, loss=5.13]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
 95%|█████████▌| 122/128 [00:21<00:01,  5.66it/s, loss=34.6]

In [11]:
learn.fit(lrs=1e-1, n_cycle=4, cycle_len=1)


                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            

                                                            
                                                            
                                                            

                                                            




  0%|          | 0/128 [00:01<?, ?it/s, loss=5.57]
  1%|          | 1/128 [00:01<02:22,  1.12s/it, loss=5.57]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
  1%|          | 1/128 [00:01<02:42,  1.28s/it, loss=5.47]
  2%|▏         | 2/128 [00:01<01:20,  1.56it/s, loss=5.47]
  2%|▏         | 2/128 [00:01<01:32,  1.36it/s, loss=5.44]
  2%|▏         | 3/128 [00:01<01:01,  2.03it/s, loss=5.44]
  2%|▏         | 3/128 [00:01<01:08,  1.83it/s, loss=5.44]
  3%|▎         | 4/128 [00:01<00:50,  2.44it/s, loss=5.44]
  3%|▎         | 4/128 [00:01<00:56,  2.21it/s, loss=5.42]
  4%|▍         | 5/128 [00:01<00:44,  2.76it/s, loss=5.42]
  4%|▍         | 5/128 [00:01<00:48,  2.54it/s, loss=5.41]
  5%|▍         | 6/128 [00:01<00:40,  3.03it/s, loss=5.41]
  5%|▍         | 6/128 [00:02<00:44,  2.75it/s, loss=5.39]
  5%|▌         | 7/128 [00:02<00:37,  3.20it/s, loss=5.39]
  5%|▌         | 7/128 [00:02<00:40,  2.98it/s, loss=5.38]
  6%|▋         | 8/128 [00:02<00:35,  3.41it/s, loss=5.38]
  6%|▋         | 8/128 [00:02<00:37,  3.19it/s, loss=5.3] 
  7%|▋         | 9/128 [00:02<00:33,  3.58it/s, loss=5.3]
  7%|▋         | 9/128 [00:02<00:35,  3.37it/s, loss=5.24]
  8%|▊         | 10/128 [00:02<00:31,  3.74it/s, loss=5.24]
  8%|▊         | 10/128 [00:02<00:33,  3.54it/s, loss=5.16]
  9%|▊         | 11/128 [00:02<00:30,  3.89it/s, loss=5.16]
  9%|▊         | 11/128 [00:02<00:31,  3.68it/s, loss=5.08]
  9%|▉         | 12/128 [00:02<00:28,  4.01it/s, loss=5.08]
  9%|▉         | 12/128 [00:03<00:30,  3.81it/s, loss=5.01]
 10%|█         | 13/128 [00:03<00:27,  4.12it/s, loss=5.01]
 10%|█         | 13/128 [00:03<00:29,  3.92it/s, loss=4.92]
 11%|█         | 14/128 [00:03<00:27,  4.22it/s, loss=4.92]
 11%|█         | 14/128 [00:03<00:28,  4.03it/s, loss=4.9] 
 12%|█▏        | 15/128 [00:03<00:26,  4.31it/s, loss=4.9]
 12%|█▏        | 15/128 [00:03<00:27,  4.13it/s, loss=4.84]
 12%|█▎        | 16/128 [00:03<00:25,  4.40it/s, loss=4.84]
 12%|█▎        | 16/128 [00:03<00:26,  4.22it/s, loss=4.8] 
 13%|█▎        | 17/128 [00:03<00:24,  4.48it/s, loss=4.8]
 13%|█▎        | 17/128 [00:03<00:25,  4.30it/s, loss=4.76]
 14%|█▍        | 18/128 [00:03<00:24,  4.55it/s, loss=4.76]
 14%|█▍        | 18/128 [00:04<00:25,  4.37it/s, loss=4.69]
 15%|█▍        | 19/128 [00:04<00:23,  4.61it/s, loss=4.69]
 15%|█▍        | 19/128 [00:04<00:24,  4.44it/s, loss=4.65]
 16%|█▌        | 20/128 [00:04<00:23,  4.68it/s, loss=4.65]
 16%|█▌        | 20/128 [00:04<00:23,  4.51it/s, loss=4.62]
 16%|█▋        | 21/128 [00:04<00:22,  4.73it/s, loss=4.62]
 16%|█▋        | 21/128 [00:04<00:23,  4.57it/s, loss=4.57]
 17%|█▋        | 22/128 [00:04<00:22,  4.79it/s, loss=4.57]
 17%|█▋        | 22/128 [00:04<00:22,  4.63it/s, loss=4.51]
 18%|█▊        | 23/128 [00:04<00:21,  4.84it/s, loss=4.51]
 18%|█▊        | 23/128 [00:04<00:22,  4.68it/s, loss=4.46]
 19%|█▉        | 24/128 [00:04<00:21,  4.88it/s, loss=4.46]
 19%|█▉        | 24/128 [00:05<00:21,  4.73it/s, loss=4.42]
 20%|█▉        | 25/128 [00:05<00:20,  4.92it/s, loss=4.42]
 20%|█▉        | 25/128 [00:05<00:21,  4.78it/s, loss=4.41]
 20%|██        | 26/128 [00:05<00:20,  4.97it/s, loss=4.41]
 20%|██        | 26/128 [00:05<00:21,  4.82it/s, loss=4.39]
 21%|██        | 27/128 [00:05<00:20,  5.00it/s, loss=4.39]
 21%|██        | 27/128 [00:05<00:20,  4.86it/s, loss=4.37]
 22%|██▏       | 28/128 [00:05<00:19,  5.04it/s, loss=4.37]
 22%|██▏       | 28/128 [00:05<00:20,  4.90it/s, loss=4.32]
 23%|██▎       | 29/128 [00:05<00:19,  5.07it/s, loss=4.32]
 23%|██▎       | 29/128 [00:05<00:20,  4.94it/s, loss=4.29]
 23%|██▎       | 30/128 [00:05<00:19,  5.10it/s, loss=4.29]
 23%|██▎       | 30/128 [00:06<00:19,  4.97it/s, loss=4.26]
 24%|██▍       | 31/128 [00:06<00:18,  5.13it/s, loss=4.26]
 24%|██▍       | 31/128 [00:06<00:19,  5.00it/s, loss=4.23]
 25%|██▌       | 32/128 [00:06<00:18,  5.16it/s, loss=4.23]
 25%|██▌       | 32/128 [00:06<00:19,  5.04it/s, loss=4.21]
 26%|██▌       | 33/128 [00:06<00:18,  5.19it/s, loss=4.21]
 26%|██▌       | 33/128 [00:06<00:18,  5.07it/s, loss=4.2] 
 27%|██▋       | 34/128 [00:06<00:18,  5.22it/s, loss=4.2]
 27%|██▋       | 34/128 [00:06<00:18,  5.09it/s, loss=4.16]
 27%|██▋       | 35/128 [00:06<00:17,  5.24it/s, loss=4.16]
 27%|██▋       | 35/128 [00:06<00:18,  5.12it/s, loss=4.15]
 28%|██▊       | 36/128 [00:06<00:17,  5.27it/s, loss=4.15]
 28%|██▊       | 36/128 [00:06<00:17,  5.15it/s, loss=4.13]
 29%|██▉       | 37/128 [00:06<00:17,  5.29it/s, loss=4.13]
 29%|██▉       | 37/128 [00:07<00:17,  5.17it/s, loss=4.11]
 30%|██▉       | 38/128 [00:07<00:16,  5.31it/s, loss=4.11]
 30%|██▉       | 38/128 [00:07<00:17,  5.19it/s, loss=4.1] 
 30%|███       | 39/128 [00:07<00:16,  5.33it/s, loss=4.1]
 30%|███       | 39/128 [00:07<00:17,  5.22it/s, loss=4.07]
 31%|███▏      | 40/128 [00:07<00:16,  5.35it/s, loss=4.07]
 31%|███▏      | 40/128 [00:07<00:16,  5.24it/s, loss=4.04]
 32%|███▏      | 41/128 [00:07<00:16,  5.37it/s, loss=4.04]
 32%|███▏      | 41/128 [00:07<00:16,  5.26it/s, loss=4.02]
 33%|███▎      | 42/128 [00:07<00:15,  5.39it/s, loss=4.02]
 33%|███▎      | 42/128 [00:07<00:16,  5.28it/s, loss=4.01]
 34%|███▎      | 43/128 [00:07<00:15,  5.40it/s, loss=4.01]

 35%|███▌      | 45/128 [00:08<00:15,  5.44it/s, loss=3.99] [A
Exception in thread Thread-4:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration
Exception in thread Thread-5:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration


[ 0.       3.24623  2.64947  0.33226]                       
[ 1.       2.86636  2.51915  0.36123]                       
[ 2.       2.68073  2.46338  0.36751]                       
[ 3.       2.54761  2.43362  0.3748 ]                       


In [12]:
learn.fit(lrs=1e-1, n_cycle=3, cycle_len=1, cycle_mult=2)


  1%|          | 1/128 [00:01<02:36,  1.24s/it, loss=2.08]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
[ 0.       2.45535  2.40095  0.38698]                       
[ 1.       2.48999  2.43022  0.36895]                       
[ 2.       2.17565  2.39381  0.38024]                       
[ 3.       2.41535  2.46541  0.36797]                       
[ 4.       2.26767  2.42203  0.3832 ]                       
[ 5.       2.08735  2.38037  0.3944 ]                       
[ 6.       1.95737  2.36502  0.39199]                       


In [14]:
lr = 1e-1
lrs = np.array([lr/1e2,lr/1e1,lr])
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)


  0%|          | 0/128 [00:00<?, ?it/s]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
[ 0.       3.10943  2.71889  0.30973]                       
[ 1.       2.71625  2.44698  0.37227]                       
[ 2.       1.94874  2.17585  0.42663]                       
[ 3.       2.30389  2.49217  0.35957]                       
[ 4.       1.85108  2.24783  0.42129]                       
[ 5.       1.2944   2.10016  0.46585]                       
[ 6.       0.94755  2.06791  0.48057]                        


In [18]:
learn.data.resize(300, 'tmp')


                                                    
Out[18]:
<fastai.dataset.ImageClassifierData at 0x7f152dd3e710>

In [19]:
learn.data.sz


Out[19]:
64

In [20]:
learn.freeze()

In [21]:
lrf = learn.lr_find()
learn.sched.plot()


  1%|          | 1/128 [00:01<02:31,  1.20s/it, loss=0.907]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
 94%|█████████▍| 120/128 [00:20<00:01,  5.91it/s, loss=3.23] 

In [22]:
lr = 5e-3
learn.fit(lr, n_cycle=1, cycle_len=1)


                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            

                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            


  0%|          | 0/128 [00:01<?, ?it/s, loss=0.706]
  1%|          | 1/128 [00:01<03:19,  1.57s/it, loss=0.706]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
  1%|          | 1/128 [00:01<03:40,  1.73s/it, loss=0.748]
  2%|▏         | 2/128 [00:01<01:49,  1.15it/s, loss=0.748]
  2%|▏         | 2/128 [00:01<02:01,  1.04it/s, loss=0.727]
  2%|▏         | 3/128 [00:01<01:20,  1.56it/s, loss=0.727]
  2%|▏         | 3/128 [00:02<01:26,  1.44it/s, loss=0.711]
  3%|▎         | 4/128 [00:02<01:04,  1.92it/s, loss=0.711]

  3%|▎         | 4/128 [00:02<01:09,  1.78it/s, loss=0.7]  
  5%|▍         | 6/128 [00:02<00:49,  2.48it/s, loss=0.702] 
Exception in thread Thread-28:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration

Exception in thread Thread-29:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration

[ 0.       0.74586  2.04658  0.4791 ]                        


In [23]:



Out[23]:
64

In [24]:
fn = PATH + data.trn_ds.fnames[0]; fn


Out[24]:
'data/dogbreeds/train/000bec180eb18c7604dcecc8fe0dba07.jpg'

In [25]:
img = PIL.Image.open(fn); img


Out[25]:

In [26]:
img.size


Out[26]:
(500, 375)

In [27]:
size_d = {k: PIL.Image.open(PATH+k).size for k in data.trn_ds.fnames}

In [28]:
row_sz, col_sz = list(zip(*size_d.values()))

In [29]:
row_sz = np.array(row_sz); col_sz = np.array(col_sz)

In [30]:
row_sz[:5]


Out[30]:
array([500, 500, 400, 500, 231])

In [32]:
plt.hist(row_sz);



In [33]:
plt.hist(row_sz[row_sz < 1000])


Out[33]:
(array([  148.,   600.,  1307.,  1205.,  4581.,   122.,    78.,    62.,    15.,     7.]),
 array([  97. ,  186.3,  275.6,  364.9,  454.2,  543.5,  632.8,  722.1,  811.4,  900.7,  990. ]),
 <a list of 10 Patch objects>)

In [34]:
plt.hist(col_sz[col]);



In [35]:
plt.hist(col_sz[col_sz < 1000])


Out[35]:
(array([  243.,   721.,  2218.,  2940.,  1837.,    95.,    29.,    29.,     8.,     8.]),
 array([ 102. ,  190.2,  278.4,  366.6,  454.8,  543. ,  631.2,  719.4,  807.6,  895.8,  984. ]),
 <a list of 10 Patch objects>)

In [36]:
len(data.trn_ds)


Out[36]:
8178

In [ ]:
len

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [10]:
# # NOTE: THIS IS W/ bs=128
# lrf = learn.lr_find()
# learn.sched.plot()


  2%|▏         | 1/64 [00:05<05:19,  5.08s/it, loss=5.62]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
[   0.         4.60545  112.17168    0.16882]             


In [11]:
lr = 2e-2

In [11]:
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)


                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            
                                                            

                                                            
                                                            
                                                            
                                                            
                                                            


                                                            

  0%|          | 0/682 [00:00<?, ?it/s, loss=5.72]
  0%|          | 1/682 [00:00<03:30,  3.23it/s, loss=5.72]
  0%|          | 1/682 [00:00<03:58,  2.86it/s, loss=5.59]
  0%|          | 1/682 [00:00<04:28,  2.54it/s, loss=5.39]
  0%|          | 1/682 [00:00<04:56,  2.30it/s, loss=5.28]
  1%|          | 4/682 [00:00<01:14,  9.15it/s, loss=5.28]
  1%|          | 4/682 [00:00<01:20,  8.41it/s, loss=5.33]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
  1%|          | 4/682 [00:00<01:27,  7.79it/s, loss=5.39]
  1%|          | 4/682 [00:00<01:33,  7.23it/s, loss=5.4] 
  1%|          | 7/682 [00:00<00:53, 12.63it/s, loss=5.4]
  1%|          | 7/682 [00:00<00:57, 11.77it/s, loss=5.47]
  1%|          | 7/682 [00:00<01:01, 11.04it/s, loss=5.45]
  1%|          | 7/682 [00:00<01:04, 10.41it/s, loss=5.45]
  1%|▏         | 10/682 [00:00<00:45, 14.83it/s, loss=5.45]
  1%|▏         | 10/682 [00:00<00:47, 14.02it/s, loss=5.47]
  1%|▏         | 10/682 [00:00<00:50, 13.27it/s, loss=5.47]
  1%|▏         | 10/682 [00:00<00:53, 12.58it/s, loss=5.44]
  2%|▏         | 13/682 [00:00<00:40, 16.32it/s, loss=5.44]
  2%|▏         | 13/682 [00:00<00:43, 15.56it/s, loss=5.43]
  2%|▏         | 13/682 [00:00<00:44, 14.88it/s, loss=5.41]
  2%|▏         | 13/682 [00:00<00:46, 14.25it/s, loss=5.41]
  2%|▏         | 16/682 [00:00<00:38, 17.51it/s, loss=5.41]
  2%|▏         | 16/682 [00:00<00:39, 16.79it/s, loss=5.42]
  2%|▏         | 16/682 [00:00<00:41, 16.16it/s, loss=5.45]
  2%|▏         | 16/682 [00:01<00:42, 15.56it/s, loss=5.46]
  3%|▎         | 19/682 [00:01<00:35, 18.45it/s, loss=5.46]
  3%|▎         | 19/682 [00:01<00:37, 17.78it/s, loss=5.44]
  3%|▎         | 19/682 [00:01<00:38, 17.16it/s, loss=5.45]
  3%|▎         | 19/682 [00:01<00:39, 16.58it/s, loss=5.42]
  3%|▎         | 22/682 [00:01<00:34, 19.18it/s, loss=5.42]
  3%|▎         | 22/682 [00:01<00:35, 18.55it/s, loss=5.42]
  3%|▎         | 22/682 [00:01<00:36, 17.97it/s, loss=5.44]
  3%|▎         | 22/682 [00:01<00:37, 17.41it/s, loss=5.45]
  4%|▎         | 25/682 [00:01<00:33, 19.77it/s, loss=5.45]
  4%|▎         | 25/682 [00:01<00:34, 19.18it/s, loss=5.47]
  4%|▎         | 25/682 [00:01<00:35, 18.63it/s, loss=5.45]
  4%|▎         | 25/682 [00:01<00:36, 18.09it/s, loss=5.47]
  4%|▍         | 28/682 [00:01<00:32, 20.25it/s, loss=5.47]
  4%|▍         | 28/682 [00:01<00:33, 19.70it/s, loss=5.47]
  4%|▍         | 28/682 [00:01<00:34, 19.17it/s, loss=5.45]
  4%|▍         | 28/682 [00:01<00:35, 18.68it/s, loss=5.45]
  5%|▍         | 31/682 [00:01<00:31, 20.66it/s, loss=5.45]
  5%|▍         | 31/682 [00:01<00:32, 20.15it/s, loss=5.45]
  5%|▍         | 31/682 [00:01<00:33, 19.66it/s, loss=5.44]
  5%|▍         | 31/682 [00:01<00:33, 19.19it/s, loss=5.44]
  5%|▍         | 34/682 [00:01<00:30, 21.02it/s, loss=5.44]
  5%|▍         | 34/682 [00:01<00:31, 20.52it/s, loss=5.41]
  5%|▍         | 34/682 [00:01<00:32, 20.04it/s, loss=5.43]
  5%|▍         | 34/682 [00:01<00:33, 19.60it/s, loss=5.43]
  5%|▌         | 37/682 [00:01<00:30, 21.32it/s, loss=5.43]
  5%|▌         | 37/682 [00:01<00:30, 20.86it/s, loss=5.41]
  5%|▌         | 37/682 [00:01<00:31, 20.42it/s, loss=5.39]
  5%|▌         | 37/682 [00:01<00:32, 19.98it/s, loss=5.39]
  6%|▌         | 40/682 [00:01<00:29, 21.58it/s, loss=5.39]
  6%|▌         | 40/682 [00:01<00:30, 21.14it/s, loss=5.38]
  6%|▌         | 40/682 [00:01<00:30, 20.73it/s, loss=5.4] 
  6%|▌         | 40/682 [00:01<00:31, 20.31it/s, loss=5.39]
  6%|▋         | 43/682 [00:01<00:29, 21.81it/s, loss=5.39]
  6%|▋         | 43/682 [00:02<00:29, 21.38it/s, loss=5.39]
  6%|▋         | 43/682 [00:02<00:30, 20.97it/s, loss=5.4] 
  6%|▋         | 43/682 [00:02<00:31, 20.58it/s, loss=5.4]
  7%|▋         | 46/682 [00:02<00:28, 21.99it/s, loss=5.4]
  7%|▋         | 46/682 [00:02<00:29, 21.58it/s, loss=5.4]
  7%|▋         | 46/682 [00:02<00:29, 21.20it/s, loss=5.39]
  7%|▋         | 46/682 [00:02<00:30, 20.82it/s, loss=5.37]
  7%|▋         | 49/682 [00:02<00:28, 22.16it/s, loss=5.37]
  7%|▋         | 49/682 [00:02<00:29, 21.77it/s, loss=5.37]
  7%|▋         | 49/682 [00:02<00:29, 21.40it/s, loss=5.42]
  7%|▋         | 49/682 [00:02<00:30, 21.04it/s, loss=5.39]
  8%|▊         | 52/682 [00:02<00:28, 22.31it/s, loss=5.39]
  8%|▊         | 52/682 [00:02<00:28, 21.94it/s, loss=5.41]
  8%|▊         | 52/682 [00:02<00:29, 21.59it/s, loss=5.4] 
  8%|▊         | 52/682 [00:02<00:29, 21.25it/s, loss=5.4]
  8%|▊         | 55/682 [00:02<00:27, 22.45it/s, loss=5.4]
  8%|▊         | 55/682 [00:02<00:28, 22.10it/s, loss=5.41]
  8%|▊         | 55/682 [00:02<00:28, 21.76it/s, loss=5.41]
  8%|▊         | 55/682 [00:02<00:29, 21.43it/s, loss=5.4] 
  9%|▊         | 58/682 [00:02<00:27, 22.58it/s, loss=5.4]
  9%|▊         | 58/682 [00:02<00:28, 22.25it/s, loss=5.38]
  9%|▊         | 58/682 [00:02<00:28, 21.91it/s, loss=5.38]
  9%|▊         | 58/682 [00:02<00:28, 21.59it/s, loss=5.39]
  9%|▉         | 61/682 [00:02<00:27, 22.69it/s, loss=5.39]
  9%|▉         | 61/682 [00:02<00:27, 22.37it/s, loss=5.39]
  9%|▉         | 61/682 [00:02<00:28, 22.05it/s, loss=5.4] 
  9%|▉         | 61/682 [00:02<00:28, 21.74it/s, loss=5.41]
  9%|▉         | 64/682 [00:02<00:27, 22.79it/s, loss=5.41]
  9%|▉         | 64/682 [00:02<00:27, 22.48it/s, loss=5.4] 
  9%|▉         | 64/682 [00:02<00:27, 22.18it/s, loss=5.42]
  9%|▉         | 64/682 [00:02<00:28, 21.87it/s, loss=5.44]
 10%|▉         | 67/682 [00:02<00:26, 22.88it/s, loss=5.44]
 10%|▉         | 67/682 [00:02<00:27, 22.58it/s, loss=5.45]
 10%|▉         | 67/682 [00:03<00:27, 22.29it/s, loss=5.45]
 10%|▉         | 67/682 [00:03<00:27, 22.01it/s, loss=5.44]
 10%|█         | 70/682 [00:03<00:26, 22.98it/s, loss=5.44]
 10%|█         | 70/682 [00:03<00:26, 22.68it/s, loss=5.43]
 10%|█         | 70/682 [00:03<00:27, 22.40it/s, loss=5.43]
 10%|█         | 70/682 [00:03<00:27, 22.12it/s, loss=5.43]
 11%|█         | 73/682 [00:03<00:26, 23.06it/s, loss=5.43]
 11%|█         | 73/682 [00:03<00:26, 22.78it/s, loss=5.43]
 11%|█         | 73/682 [00:03<00:27, 22.50it/s, loss=5.43]
 11%|█         | 73/682 [00:03<00:27, 22.24it/s, loss=5.43]
 11%|█         | 76/682 [00:03<00:26, 23.14it/s, loss=5.43]
 11%|█         | 76/682 [00:03<00:26, 22.86it/s, loss=5.43]
 11%|█         | 76/682 [00:03<00:26, 22.60it/s, loss=5.45]
 11%|█         | 76/682 [00:03<00:27, 22.34it/s, loss=5.44]
 12%|█▏        | 79/682 [00:03<00:25, 23.21it/s, loss=5.44]
 12%|█▏        | 79/682 [00:03<00:26, 22.94it/s, loss=5.44]
 12%|█▏        | 79/682 [00:03<00:26, 22.69it/s, loss=5.43]
 12%|█▏        | 79/682 [00:03<00:26, 22.44it/s, loss=5.43]
 12%|█▏        | 82/682 [00:03<00:25, 23.28it/s, loss=5.43]
 12%|█▏        | 82/682 [00:03<00:26, 23.02it/s, loss=5.44]

 12%|█▏        | 82/682 [00:03<00:26, 22.77it/s, loss=5.46]
 12%|█▏        | 82/682 [00:03<00:26, 22.51it/s, loss=5.47]
 12%|█▏        | 85/682 [00:03<00:25, 23.33it/s, loss=5.47]
 13%|█▎        | 88/682 [00:03<00:25, 23.16it/s, loss=5.46] [A
Exception in thread Thread-4:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration

Exception in thread Thread-5:
Traceback (most recent call last):
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/tqdm/_tqdm.py", line 144, in run
    for instance in self.tqdm_cls._instances:
  File "/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/_weakrefset.py", line 60, in __iter__
    for itemref in self.data:
RuntimeError: Set changed size during iteration

[ 0.       4.77925  4.62485  0.03411]                       
[ 1.       4.57017  4.53765  0.03996]                       
[ 2.       4.46189  4.5175   0.04435]                       
[ 3.       4.51572  4.54183  0.04142]                       
[ 4.       4.40379  4.47268  0.04776]                       
[ 5.       4.31973  4.45794  0.04435]                       
[ 6.       4.23797  4.46152  0.04532]                       


In [ ]:


In [12]:
lrs = np.array([lr/9,lr/3,lr])

In [13]:
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)


  0%|          | 1/682 [00:00<04:58,  2.28it/s, loss=4.72]
/home/wnixalo/miniconda3/envs/fastai/lib/python3.6/site-packages/torch/nn/modules/container.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
  input = module(input)
[ 0.       4.72217  4.69463  0.01852]                       
[ 1.       4.68598  4.62722  0.01511]                       
[ 2.       4.60267  4.60278  0.02729]                       
[ 3.       4.68185  4.62183  0.02583]                       
[ 4.       4.60635  4.55476  0.02388]                       
[ 5.       4.56302  4.52383  0.02875]                       
[ 6.       4.5056   4.52512  0.03216]                       


In [14]:
learn.save(f'{sz}_dogbreeds_RN50_00')

In [ ]: