In [1]:
import torch

In [2]:
torch.backends.cudnn.enabled = False

In [3]:
def initialize():
    global a, b, c
    a = torch.nn.Linear(1,10)
    b = torch.nn.Linear(10,10)
    c = torch.nn.Linear(10,1)
    
x = torch.tensor([1.])

def forward(x):
    return c(b(a(x).relu()).relu())

def backprop(y):
    y.backward()
    for layer in [c,b,a]:
        for param in layer.parameters():
            param.data -= .01 * param.grad
            param.grad *= 0
            
def train():
    y = forward(x)
    backprop(y)
    return y

Random weights and biases

train() allows us to shrink the output of neuron C, which has 10 inputs/fan-in-weights

no need to show that weights are asymmetrical, this is the usual case


In [4]:
initialize()
print(train())
print(train())


tensor([-0.3623], grad_fn=<ThAddBackward>)
tensor([-0.3764], grad_fn=<ThAddBackward>)

Random biases, No randomness in weights

we can break symmetry in weights


In [5]:
initialize()
for layer in [a,b,c]:
    layer.weight.data = torch.ones_like(layer.weight)

In [6]:
print(c.weight)


Parameter containing:
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]], requires_grad=True)

In [7]:
train(), print(c.weight)


Parameter containing:
tensor([[0.9021, 0.8972, 0.9009, 0.9020, 0.8984, 0.8989, 0.9003, 0.9026, 0.8991,
         0.8999]], requires_grad=True)
Out[7]:
(tensor([99.6101], grad_fn=<ThAddBackward>), None)

In [8]:
train(), print(c.weight)


Parameter containing:
tensor([[0.8254, 0.8157, 0.8230, 0.8253, 0.8181, 0.8192, 0.8219, 0.8266, 0.8196,
         0.8210]], requires_grad=True)
Out[8]:
(tensor([70.4516], grad_fn=<ThAddBackward>), None)

In [9]:
train(), print(c.weight)


Parameter containing:
tensor([[0.7640, 0.7494, 0.7604, 0.7638, 0.7530, 0.7546, 0.7587, 0.7657, 0.7553,
         0.7574]], requires_grad=True)
Out[9]:
(tensor([51.7601], grad_fn=<ThAddBackward>), None)

fan-ins are almost identical across neurons, elements within one neuron's fan-in are different


In [10]:
b.weight #there's a small amount of symmetry breaking, but the fan-ins are pretty similar


Out[10]:
Parameter containing:
tensor([[0.9588, 0.9966, 0.9575, 0.9558, 0.9942, 0.9811, 0.9962, 0.9648, 0.9706,
         0.9973],
        [0.9590, 0.9966, 0.9577, 0.9560, 0.9942, 0.9811, 0.9962, 0.9650, 0.9707,
         0.9973],
        [0.9589, 0.9966, 0.9576, 0.9558, 0.9942, 0.9811, 0.9962, 0.9649, 0.9706,
         0.9973],
        [0.9588, 0.9966, 0.9575, 0.9558, 0.9942, 0.9811, 0.9962, 0.9648, 0.9706,
         0.9973],
        [0.9590, 0.9966, 0.9577, 0.9559, 0.9942, 0.9811, 0.9962, 0.9650, 0.9707,
         0.9973],
        [0.9589, 0.9966, 0.9577, 0.9559, 0.9942, 0.9811, 0.9962, 0.9650, 0.9707,
         0.9973],
        [0.9589, 0.9966, 0.9576, 0.9558, 0.9942, 0.9811, 0.9962, 0.9649, 0.9707,
         0.9973],
        [0.9588, 0.9966, 0.9575, 0.9557, 0.9942, 0.9810, 0.9962, 0.9648, 0.9706,
         0.9973],
        [0.9589, 0.9966, 0.9577, 0.9559, 0.9942, 0.9811, 0.9962, 0.9649, 0.9707,
         0.9973],
        [0.9589, 0.9966, 0.9576, 0.9559, 0.9942, 0.9811, 0.9962, 0.9649, 0.9707,
         0.9973]], requires_grad=True)

after 25 epochs: fan-ins are different across neurons, elements within one neuron's fan-in are different


In [11]:
for _ in range(25):
    train()
b.weight #now the fan-ins are pretty different


Out[11]:
Parameter containing:
tensor([[0.9193, 0.9966, 0.9151, 0.9091, 0.9942, 0.9768, 0.9962, 0.9387, 0.9545,
         0.9973],
        [0.9212, 0.9966, 0.9168, 0.9102, 0.9942, 0.9770, 0.9962, 0.9399, 0.9552,
         0.9973],
        [0.9197, 0.9966, 0.9152, 0.9090, 0.9942, 0.9769, 0.9962, 0.9390, 0.9547,
         0.9973],
        [0.9193, 0.9966, 0.9151, 0.9091, 0.9942, 0.9768, 0.9962, 0.9387, 0.9546,
         0.9973],
        [0.9207, 0.9966, 0.9163, 0.9096, 0.9942, 0.9770, 0.9962, 0.9396, 0.9550,
         0.9973],
        [0.9205, 0.9966, 0.9161, 0.9094, 0.9942, 0.9770, 0.9962, 0.9394, 0.9550,
         0.9973],
        [0.9199, 0.9966, 0.9155, 0.9092, 0.9942, 0.9769, 0.9962, 0.9391, 0.9548,
         0.9973],
        [0.9191, 0.9966, 0.9148, 0.9088, 0.9942, 0.9768, 0.9962, 0.9386, 0.9545,
         0.9973],
        [0.9204, 0.9966, 0.9160, 0.9094, 0.9942, 0.9769, 0.9962, 0.9394, 0.9549,
         0.9973],
        [0.9201, 0.9966, 0.9157, 0.9095, 0.9942, 0.9769, 0.9962, 0.9392, 0.9548,
         0.9973]], requires_grad=True)

No randomness in bias/weights

we can't break symmetry


In [12]:
initialize()
for layer in [a,b,c]:
    layer.weight.data = torch.ones_like(layer.weight)
    layer.bias.data = torch.ones_like(layer.bias)

In [13]:
print(c.weight, c.bias)


Parameter containing:
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]], requires_grad=True) Parameter containing:
tensor([1.], requires_grad=True)

In [14]:
train(), print(c.weight, c.bias)


Parameter containing:
tensor([[0.7900, 0.7900, 0.7900, 0.7900, 0.7900, 0.7900, 0.7900, 0.7900, 0.7900,
         0.7900]], requires_grad=True) Parameter containing:
tensor([0.9900], requires_grad=True)
Out[14]:
(tensor([211.], grad_fn=<ThAddBackward>), None)

In [15]:
train(), print(c.weight, c.bias)


Parameter containing:
tensor([[0.6037, 0.6037, 0.6037, 0.6037, 0.6037, 0.6037, 0.6037, 0.6037, 0.6037,
         0.6037]], requires_grad=True) Parameter containing:
tensor([0.9800], requires_grad=True)
Out[15]:
(tensor([148.1670], grad_fn=<ThAddBackward>), None)

In [16]:
train(), print(c.weight, c.bias)


Parameter containing:
tensor([[0.4350, 0.4350, 0.4350, 0.4350, 0.4350, 0.4350, 0.4350, 0.4350, 0.4350,
         0.4350]], requires_grad=True) Parameter containing:
tensor([0.9700], requires_grad=True)
Out[16]:
(tensor([102.8286], grad_fn=<ThAddBackward>), None)

fan-ins are identical across neurons, elements within one neuron's fan-in are identical


In [17]:
b.weight


Out[17]:
Parameter containing:
tensor([[0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558],
        [0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558, 0.9558,
         0.9558]], requires_grad=True)

after 25 epochs: fan-ins are identical across neurons, elements within one neuron's fan-in are identical


In [18]:
for _ in range(25):
    train()
b.weight


Out[18]:
Parameter containing:
tensor([[25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077],
        [25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077, 25.1077,
         25.1077, 25.1077]], requires_grad=True)