其他
PyTorch 教程-PyTorch中图像识别模型的测试
在上一部分,我们实现了一个神经网络,或者说创建了一个能够对手写数字进行分类的模型。现在,我们通过从网络上获取一张图片来测试我们的模型。我们使用了以下图片:
在看到这个图片后,我们将意识到它是数字5。现在,我们将尝试让我们的网络进行预测。
我们有以下步骤来预测数字图像:
步骤1:
import requests
url='https://images.homedepot-static.com/productImages/007164ea-d47e-4f66-8d8c-fd9f621984a2/svn/architectural-mailboxes-house-letters-numbers-3585b-5-64_1000.jpg'
步骤2:
response=requests.get(url,stream=True)
步骤3:
from PIL import Image
img=Image.open(response.raw)
plt.imshow(img)
plt.show()
当运行它时,它会生成一个错误,因为PIL。我们必须首先安装Pillow才能运行此代码。我们必须在Anaconda命令提示符上运行conda install -c anaconda pillow命令来安装pillow。
当您运行代码时,它将给出预期的输出。
步骤4:
我们需要确保图像与神经网络训练要学习的内容相对应。我们的图像大小为1000 * 1000像素,因此我们需要将其变成28 * 28的灰度图像,就像训练数据中的图像一样。在我们的训练图像数据集中,图像具有黑色背景和白色前景,在上面的图像中有白色背景和黑色前景。现在,我们的第一个任务是对此图像进行预处理。
img=PIL.ImageOps.invert(img)
img=img.convert('1')
transform1=transforms.Compose([transforms.Resize((28,28)),transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
img=transform1(img)
import PIL.ImageOps
plt.imshow(im_convert(img))
步骤5:
img=img.view(img.shape[0],-1)
output=model(img)
_,pred=torch.max(output,1)
print(pred.item())
它将给出预期的预测:
步骤6:
dataiter=iter(validation_loader)
images,labels=dataiter.next()
images_=images.view(images.shape[0],-1)
output=model(images_)
_,preds=torch.max(output,1)
步骤7:
fig=plt.figure(figsize=(25,4))
ax=fig.add_subplot(2,10,idx+1)
plt.imshow(im_convert(images[idx]))
ax.set_title("{}({})".format(str(preds[idx].item()),str(labels[idx].item())),color=("green" if preds[idx]==labels[idx] else "red"))
最后调用plt.show(),它将给出我们期望的结果。
完整代码:
import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as func
import PIL.ImageOps
from torch import nn
from torchvision import datasets,transforms
import requests
from PIL import Image
transform1=transforms.Compose([transforms.Resize((28,28)),transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
training_dataset=datasets.MNIST(root='./data',train=True,download=True,transform=transform1)
validation_dataset=datasets.MNIST(root='./data',train=False,download=True,transform=transform1)
training_loader=torch.utils.data.DataLoader(dataset=training_dataset,batch_size=100,shuffle=True)
validation_loader=torch.utils.data.DataLoader(dataset=validation_dataset,batch_size=100,shuffle=False)
def im_convert(tensor):
image=tensor.clone().detach().numpy()
image=image.transpose(1,2,0)
print(image.shape)
image=image*(np.array((0.5,0.5,0.5))+np.array((0.5,0.5,0.5)))
image=image.clip(0,1)
return image
dataiter=iter(training_loader)
images,labels=dataiter.next()
fig=plt.figure(figsize=(25,4))
for idx in np.arange(20):
ax=fig.add_subplot(2,10,idx+1)
plt.imshow(im_convert(images[idx]))
ax.set_title([labels[idx].item()])
class classification1(nn.Module):
def __init__(self,input_layer,hidden_layer1,hidden_layer2,output_layer):
super().__init__()
self.linear1=nn.Linear(input_layer,hidden_layer1)
self.linear2=nn.Linear(hidden_layer1,hidden_layer2)
self.linear3=nn.Linear(hidden_layer2,output_layer)
def forward(self,x):
x=func.relu(self.linear1(x))
x=func.relu(self.linear2(x))
x=self.linear3(x)
return x
model=classification1(784,125,65,10)
criteron=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
epochs=12
loss_history=[]
correct_history=[]
val_loss_history=[]
val_correct_history=[]
for e in range(epochs):
loss=0.0
correct=0.0
val_loss=0.0
val_correct=0.0
for input,labels in training_loader:
inputs=input.view(input.shape[0],-1)
outputs=model(inputs)
loss1=criteron(outputs,labels)
optimizer.zero_grad()
loss1.backward()
optimizer.step()
_,preds=torch.max(outputs,1)
loss+=loss1.item()
correct+=torch.sum(preds==labels.data)
else:
with torch.no_grad():
for val_input,val_labels in validation_loader:
val_inputs=val_input.view(val_input.shape[0],-1)
val_outputs=model(val_inputs)
val_loss1=criteron(val_outputs,val_labels)
_,val_preds=torch.max(val_outputs,1)
val_loss+=val_loss1.item()
val_correct+=torch.sum(val_preds==val_labels.data)
epoch_loss=loss/len(training_loader.dataset)
epoch_acc=correct.float()/len(training_dataset)
loss_history.append(epoch_loss)
correct_history.append(epoch_acc)
val_epoch_loss=val_loss/len(validation_loader.dataset)
val_epoch_acc=val_correct.float()/len(validation_dataset)
val_loss_history.append(val_epoch_loss)
val_correct_history.append(val_epoch_acc)
print('training_loss:{:.4f},{:.4f}'.format(epoch_loss,epoch_acc.item()))
print('validation_loss:{:.4f},{:.4f}'.format(val_epoch_loss,val_epoch_acc.item()))
url='https://images.homedepot-static.com/productImages/007164ea-d47e-4f66-8d8c-fd9f621984a2/svn/architectural-mailboxes-house-letters-numbers-3585b-5-64_1000.jpg'
response=requests.get(url,stream=True)
img=Image.open(response.raw)
img=PIL.ImageOps.invert(img)
img=img.convert('1')
img=transform1(img)
plt.imshow(im_convert(img))
img=img.view(img.shape[0],-1)
output=model(img)
_,pred=torch.max(output,1)
print(pred.item())
dataiter=iter(validation_loader)
images,labels=dataiter.next()
images_=images.view(images.shape[0],-1)
output=model(images_)
_,preds=torch.max(output,1)
fig=plt.figure(figsize=(25,4))
for idx in np.arange(20):
ax=fig.add_subplot(2,10,idx+1,xticks=[],yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{}({})".format(str(preds[idx].item()),str(labels[idx].item())),color=("green" if preds[idx]==labels[idx] else "red"))
plt.show()