wordpress 標題字體簡述搜索引擎優(yōu)化
基于映射鑒別器的CGAN
模型中,判別器(Discriminator)不是通過將條件信息簡單地與特征向量拼接(concatenate)來使用條件信息,而是采用一種基于投影的方式,這種方式更加尊重條件信息在底層概率模型中的作用。
判別器的構(gòu)建是受到概率模型假設的啟發(fā),其中條件變量 y 給定 x 的分布是離散的或單峰連續(xù)分布。這種模型假設在許多實際應用中很常見,包括類條件圖像生成和超分辨率。通過這種假設,可以形成一個需要在嵌入的條件向量 y 和特征向量之間進行內(nèi)積的判別器結(jié)構(gòu)。
代碼實現(xiàn)
class DiscriminatorPCGAN(nn.Module):def __init__(self, x_dim, c_dim, dim=96, norm='none', weight_norm='spectral_norm'):super(DiscriminatorPCGAN, self).__init__()norm_fn = _get_norm_fn_2d(norm)weight_norm_fn = _get_weight_norm_fn(weight_norm)def conv_norm_lrelu(in_dim, out_dim, kernel_size=3, stride=1, padding=1):return nn.Sequential(weight_norm_fn(nn.Conv2d(in_dim, out_dim, kernel_size, stride, padding)),norm_fn(out_dim),nn.LeakyReLU(0.2))self.ls = nn.Sequential( # (N, x_dim, 32, 32)conv_norm_lrelu(x_dim, dim),conv_norm_lrelu(dim, dim),conv_norm_lrelu(dim, dim, stride=2), # (N, dim , 16, 16)conv_norm_lrelu(dim, dim * 2),conv_norm_lrelu(dim * 2, dim * 2),conv_norm_lrelu(dim * 2, dim * 2, stride=2), # (N, dim*2, 8, 8)conv_norm_lrelu(dim * 2, dim * 2, kernel_size=3, stride=1, padding=0),conv_norm_lrelu(dim * 2, dim * 2, kernel_size=1, stride=1, padding=0),conv_norm_lrelu(dim * 2, dim * 2, kernel_size=1, stride=1, padding=0), # (N, dim*2, 6, 6)nn.AvgPool2d(kernel_size=6), # (N, dim*2, 1, 1)torchlib.Reshape(-1, dim * 2), # (N, dim*2))self.l_logit = weight_norm_fn(nn.Linear(dim * 2, 1)) # (N, 1)self.l_projection = weight_norm_fn(nn.Linear(dim * 2, c_dim)) # (N, c_dim)def forward(self, x, c):# x: (N, x_dim, 32, 32), c: (N, c_dim)feat = self.ls(x)logit = self.l_logit(feat)# 做一個線性編碼embed = (self.l_projection(feat) * c).mean(1, keepdim=True)logit += embedreturn logit
CGAN參考文章