In [1]:
keep_prob = 0.5
do_dropout = True
In [2]:
import torch
import copy
w1 = torch.randn(4, 4) # 某层的weights
w = copy.deepcopy(w1)
w
Out[2]:
In [3]:
def dropout_strict(w, keep_prob):
"""implement inverted dropout ensuring that the share of kept neurons is strictly keep_prob.
Args:
w (torch.tensor) : weights before dropout
keep_prob(float) : keep probability
"""
k = round(w.shape[1] * keep_prob)
_, indices = torch.topk(torch.randn(w.shape[0], w.shape[1]), k)
keep = torch.zeros(4, 4).scatter_(dim=1, index=indices, src=torch.ones_like(w))
w *= keep
w /= keep_prob
In [4]:
if do_dropout:
dropout_strict(w, keep_prob)
print(w)
In [23]:
import numpy as np
import copy
w1 = np.random.randn(4, 4) # 某层的weights
w = copy.deepcopy(w1)
w
Out[23]:
In [24]:
def dropout_loose(w, keep_prob):
"""A simple Implementation of inverted dropout.
Args:
w(np.array) :- neurons subject to dropout
keep_prob(float) :- keep probability
"""
keep = np.random.rand(w.shape[0], w.shape[1]) < keep_prob
w *= keep
w /= keep_prob
In [25]:
if do_dropout:
dropout_loose(w, keep_prob)
print(w)
In [ ]: