其他
【强基固本】深度学习入门与Pytorch4.1 深度学习中的几种自编码器的介绍与Pytorch代码实现
“强基固本,行稳致远”,科学研究离不开理论基础,人工智能学科更是需要数学、物理、神经科学等基础学科提供有力支撑,为了紧扣时代脉搏,我们推出“强基固本”专栏,讲解AI领域的基础知识,为你的科研学习提供助力,夯实理论基础,提升原始创新能力,敬请关注。
地址:https://zhuanlan.zhihu.com/p/418352070
01
02
# import packages
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
%matplotlib inline
torch.manual_seed(1) # 重建
# Hyper Parameters
EPOCH = 5
BATCH_SIZE = 128
LR = 0.005 #学习率
DOWNLOAD_MNIST = False
N_TEST_IMG = 6
train_data = torchvision.datasets.MNIST(
root='../data/',
train=True, # 训练集
transform= torchvision.transforms.Compose([torchvision.transforms.ToTensor(),]), # 将数据转换成tensor并归一化到[-1,1]
download=DOWNLOAD_MNIST, # 如果没有,将下载
)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28, 100),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.Linear(100, 28*28),
nn.ReLU(), # compress to a range (0, 1)
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
autoencoder = AutoEncoder()
print(autoencoder)
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28)
b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28)
b_label = y # batch label
encoded, decoded = autoencoder(b_x)
loss = loss_func(decoded, b_y) # mean square error
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 500 == 0 and epoch in range(EPOCH):
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.item())
# plotting decoded image (second row)
_, decoded_data = autoencoder(view_data)
# initialize figure
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))
for i in range(N_TEST_IMG):
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())
for i in range(N_TEST_IMG):
a[1][i].clear()
a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')
a[1][i].set_xticks(()); a[1][i].set_yticks(())
plt.show(); plt.pause(0.05)
03
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28, 400),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.Linear(400, 28*28),
nn.ReLU(), # compress to a range (0, 1)
)
def forward(self, x):
encoded = self.encoder(x)
x = L1Penality.apply(x, 0.1) # 10% of the weights are supposed to be zero
decoded = self.decoder(encoded)
return encoded, decoded
autoencoder = AutoEncoder()
print(autoencoder)
init_weightsE = copy.deepcopy(autoencoder.encoder[0].weight.data)
init_weightsD = copy.deepcopy(autoencoder.decoder[0].weight.data)
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28)
b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28)
b_label = y # batch label
encoded, decoded = autoencoder(b_x)
loss = loss_func(decoded, b_y) # mean square error
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 500 == 0 and epoch in range(EPOCH):
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.item())
# plotting decoded image (second row)
_, decoded_data = autoencoder(view_data)
# initialize figure
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))
for i in range(N_TEST_IMG):
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())
for i in range(N_TEST_IMG):
a[1][i].clear()
a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')
a[1][i].set_xticks(()); a[1][i].set_yticks(())
plt.show(); plt.pause(0.05)
04
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28, 400),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.Linear(400, 28*28),
nn.ReLU(), # compress to a range (0, 1)
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
noise_mean = 0.01
noise_std = 0.02
autoencoder = AutoEncoder()
print(autoencoder)
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28)
b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28)
b_label = y # batch label
noise = torch.autograd.Variable(torch.tensor(np.random.normal(noise_mean, noise_std, b_x.view(-1, 28*28).shape)))
b_x_noise = torch.autograd.Variable(torch.clamp(b_x+noise, 0, 1))
encoded, decoded = autoencoder(b_x_noise.to(torch.float32))
loss = loss_func(decoded, b_y) # mean square error
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 500 == 0 and epoch in range(EPOCH):
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.item())
# plotting decoded image (second row)
_, decoded_data = autoencoder(view_data)
# initialize figure
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))
for i in range(N_TEST_IMG):
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())
for i in range(N_TEST_IMG):
a[1][i].clear()
a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')
a[1][i].set_xticks(()); a[1][i].set_yticks(())
plt.show(); plt.pause(0.05)
Reference
https://www.deeplearningbook.org/contents/autoencoders.html
https://arxiv.org/pdf/1606.05908.pdf
https://www.deeplearningbook.org/
https://debuggercafe.com/implementing-deep-autoencoder-in-pytorch/
https://github.com/abhisheksamb
本文目的在于学术交流,并不代表本公众号赞同其观点或对其内容真实性负责,版权归原作者所有,如有侵权请告知删除。
“强基固本”历史文章
白化变换:PCA白化、ZCA白化
常用卷积神经网络巡礼(论文详解+代码实现)
强化学习入门简述
深度学习分类任务常用评估指标
漫谈什么是AI框架?
深度学习检测小目标常用方法
CNN网络结构的发展
传统图机器学习特征提取方法 -- 基于节点水平的特征(Node-level)
GNN | GCN-谱图卷积从零开始
神经网络如何模拟任意函数?
GMM: Gaussian Mixed Model(高斯混合模型)
深挖一下F1 score (F-measure, F-score)
浅谈拉格朗日乘子法
通用矩阵乘(GEMM)优化算法
更多强基固本专栏文章,
请点击文章底部“阅读原文”查看
分享、点赞、在看,给个三连击呗!