In [1]:
import torch
import torch.nn.functional as F
from torch.autograd import Variable
In [34]:
class generator(torch.nn.Module):
def __init__(self, input_shape):
super(generator, self).__init__()
k_size = (9, 1)
pad_size = ((k_size[0]-1)/2, (k_size[1]-1)/2)
fd = [128, 256, 512]
self.conv1 = torch.nn.Conv2d(
1, fd[0], kernel_size=k_size, stride=(2, 1), padding=pad_size)
torch.nn.init.uniform(self.conv1.weight, -1.0, 1.0)
torch.nn.init.constant(self.conv1.bias, 0.0)
self.conv2 = torch.nn.Conv2d(
fd[0], fd[1], kernel_size=k_size, stride=(2, 1), padding=pad_size)
torch.nn.init.uniform(self.conv2.weight, -1.0, 1.0)
torch.nn.init.constant(self.conv2.bias, 0.0)
self.conv3 = torch.nn.Conv2d(
fd[1], fd[2], kernel_size=k_size, stride=(2, 1), padding=pad_size)
torch.nn.init.uniform(self.conv3.weight, -1.0, 1.0)
torch.nn.init.constant(self.conv3.bias, 0.0)
self.out_ = torch.nn.Linear(fd[2]*input_shape[0]/8, input_shape[0]/4)
def forward(self, src_emb):
g1 = self.conv1(src_emb.unsqueeze(1))
g1 = self.conv2(g1)
g1 = self.conv3(g1)
g1 = g1.view(g1.size(0), -1, g1.size(-1))
g1 = g1.transpose(1, 2)
g1 = self.out_(g1)
g1 = g1.transpose(1, 2)
return g1
class discriminator(torch.nn.Module):
def __init__(self, input_shape):
super(discriminator, self).__init__()
k_size = (5, input_shape[1])
pad_size = ((k_size[0]-1)/2, 0)
fd = 128
self.conv1 = torch.nn.Conv2d(
1, fd, kernel_size=k_size, stride=(1, 1), padding=pad_size)
torch.nn.init.uniform(self.conv1.weight, -1.0, 1.0)
torch.nn.init.constant(self.conv1.bias, 0.0)
self.out_ = torch.nn.Linear(fd*input_shape[0], 1)
def forward(self, trg_emb):
d1 = self.conv1(trg_emb.unsqueeze(1))
d1 = self.out_(d1.view(d1.size(0), -1))
d1 = F.sigmoid(d1)
return d1
src_emb = Variable(torch.rand(32, 400, 128)).cuda()
model_G = generator((400, 1)).cuda()
model_D = discriminator((100, 128)).cuda()
gg = model_G(src_emb)
dd = model_D(gg)
print dd
In [ ]:
In [ ]: