pytorch深度学习⼊门与实战孙⽟林第六章卷积神经⽹络识别Fashion-MNIST
import numpy as np
import pandas as pd
ics import accuracy_score,confusion_matrix,classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import time
import torch
as nn
from torch.optim import SGD,Adam
import torch.utils.data as Data
from torchvision import transforms
from torchvision.datasets import FashionMNIST
#使⽤FashionMNIST数据,准备训练数据集
train_data = FashionMNIST(root="../resourses/datasets/",train=True,transform=transforms.ToTensor(),download=False)
#定义⼀个数据加载器
train_loader = Data.DataLoader(dataset=train_data,batch_size=64,shuffle=False,num_workers=0)
#计算train_loader有多少个batch
print("train_loader的batch数量为:",len(train_loader))
#获取⼀个batch的数据
for step,(b_x,b_y)in enumerate(train_loader):
if step >0:
break
#可视化⼀个batch的图像
print("b_x:",b_x.shape)#torch.Size([64, 1, 28, 28])
batch_x = b_x.squeeze().numpy()
batch_y = b_y.numpy()
print("batch_x:",batch_x.shape,"batch_y:",batch_y.shape)#batch_x: (64, 28, 28) batch_y: (64,)
class_label = train_data.classes
class_label[0]="T-shirt"
plt.figure(figsize=(12,5))
for ii in np.arange(len(batch_y)):
plt.subplot(4,16,ii+1)# 4⾏16列
plt.imshow(batch_x[ii,:,:],ay)#batch_x是三维的,顾要加两个分号
plt.title(class_label[batch_y[ii]],size=9)
plt.axis("off")
plt.subplots_adjust(wspace=0.5)# subplot的间距
plt.show()
#对测试集进⾏处理
test_data = FashionMNIST(root="../resourses/datasets/",train=False,download=False)
#为数据添加⼀个通道维度,并且取值范围缩放到0-1之间
test_data_x = test_pe(torch.FloatTensor)/255.0
test_data_x = torch.unsqueeze(test_data_x,dim=1)
test_data_y = test_data.targets #测试集的标签
print("test_data_x.shape",test_data_x.shape)#test_data_x.shape torch.Size([10000, 1, 28, 28])
print("test_data_y.shape",test_data_y.shape)#test_data_y.shape torch.Size([10000])
#搭建卷积神经⽹络
class MyConvNet(nn.Module):
def__init__(self):
super(MyConvNet, self).__init__()
#定义第⼀个卷积层
nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1),#卷积后:(1*28*28)->(16*28*28)
nn.ReLU(),
nn.AvgPool2d(kernel_size=2,stride=2)#池化后:(16*28*28)->(16*14*14)
)
#定义第⼆个卷积层
nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3,stride=1,padding=0),#卷积后:(16*14*14)->(32*12*12)
nn.ReLU(),
nn.AvgPool2d(kernel_size=2,stride=2)#池化后:(32*12*12)->(32*6*6)
)
self.classifier = nn.Sequential(
self.classifier = nn.Sequential(
nn.Linear(32*6*6,256),#长⽅体变平⾯
nn.ReLU(),
nn.Linear(256,128),
nn.ReLU(),
nn.Linear(128,10)
)
#定义⽹络的前向传播路径
def forward(self,x):
x = v1(x)
x = v2(x)
x = x.view(x.size(0),-1)#展平多维的卷积图层
output = self.classifier(x)
return output
#输出⽹络结构
myconvnet = MyConvNet()
print(myconvnet)
#定义⽹络的训练过程函数
def train_model(model,traindataloader,train_rate,criterion,optimizer,num_epochs=25):
#train_rate:训练集batchsize百分⽐
#计算训练使⽤的batch数量
batch_num =len(traindataloader)
train_batch_num =round(batch_num * train_rate)#前train_rate的batch进⾏训练
#复制模型的参数
best_model_wts = copy.deepcopy(model.state_dict())
best_acc =0.0
train_loss_all =[]
train_acc_all =[]
val_loss_all =[]
val_acc_all =[]
since = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch,num_epochs-1))
print('-'*10)
#每个epoch有两个训练阶段
train_loss =0.0
train_corrects =0
train_num =0
val_loss =0.0
val_corrects =0
val_num =0
for step,(b_x,b_y)in enumerate(traindataloader):
if step < train_batch_num:#前train_rate的batch进⾏训练
output = model(b_x)
pre_lab = torch.argmax(output,1)
loss = criterion(output,b_y)#loss是⼀个batch的loss,每个样本的loss均值,
<_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()* b_x.size(0)#此处的b_x.size(0)=batch_size。此处相当于⼀个batch的loss train_corrects += torch.sum(pre_lab == b_y.data)
train_num += b_x.size(0)
else:
model.eval()#设置模型为评估模式
output = model(b_x)
pre_lab = torch.argmax(output,1)
loss = criterion(output,b_y)
val_loss += loss.item()* b_x.size(0)
val_corrects += torch.sum(pre_lab == b_y.data)
val_num += b_x.size(0)
#计算⼀个epoch在训练集和验证集上的损失和精度
train_loss_all.append(train_loss / train_num)
train_acc_all.append(train_corrects.double().item()/ train_num)
val_loss_all.append(val_loss / val_num)
val_acc_all.append(val_corrects.double().item()/ val_num)
val_acc_all.append(val_corrects.double().item()/ val_num)
print('{} Train Loss: {:.4f} Train Acc: {:.4f}'.format(epoch,train_loss_all[-1],train_acc_all[-1]))#此处-1没搞明⽩print('{} Val Loss: {:.4f} Val Acc: {:.4f}'.format(epoch,val_loss_all[-1],val_acc_all[-1]))
#拷贝模型最⾼精度下的参数
if val_acc_all[-1]> best_acc:
best_acc = val_acc_all[-1]
best_model_wts = copy.deepcopy(model.state_dict())
time_use = time.time()- since
print("Train and val complete in {:.0f}m {:.0f}s".format(time_use //60,time_use %60))
#使⽤最好模型的参数
model.load_state_dict(best_model_wts)
#组成数据表格train_process输出
train_process = pd.DataFrame(data={"epoch":range(num_epochs),
"train_loss_all":train_loss_all,
"val_loss_all":val_loss_all,
"train_acc_all":train_acc_all,
"val_acc_all":val_acc_all})
return model,train_process
#对模型进⾏训练
optimizer = Adam(myconvnet.parameters(),lr=0.0003)
criterion = nn.CrossEntropyLoss()
myconvnet,train_process = train_model(myconvnet,train_loader,0.8,criterion,optimizer,num_epochs=25)
#可视化模型训练过程中
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(train_process.epoch,ain_loss_all,"ro-",label="Train loss")
plt.plot(train_process.epoch,train_process.val_loss_all,"bs-",label="Val loss")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("Loss")
plt.subplot(1,2,2)
plt.plot(train_process.epoch,ain_acc_all,"ro-",label="Train acc")
plt.plot(train_process.epoch,train_process.val_acc_all,"bs-",label="Val acc")
plt.xlabel("epoch")
plt.ylabel("acc")
plt.legend()
plt.show()
#对测试集进⾏预测,并可视化预测结果。计算模型的泛化能⼒
myconvnet.eval()
output = myconvnet(test_data_x)
pre_lab = torch.argmax(output,1)
acc = accuracy_score(test_data_y,pre_lab)
print("在测试集上的预测精度为:",acc)
#计算混淆矩阵并可视化
conf_mat = confusion_matrix(test_data_y,pre_lab)
df_cm = pd.DataFrame(conf_mat,index=class_label,columns=class_label)
heatmap = sns.heatmap(df_cm,annot=True,fmt="d",cmap="YlGnBu")
heatmap.yaxis.set_ticklabels(_ticklabels(),rotation=0,ha='right')
heatmap.xaxis.set_ticklabels(_ticklabels(),rotation=45,ha='right')
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
空洞卷积部分:
#空洞卷积部分
class MyConvdilaNet(nn.Module):
def__init__(self):
super(MyConvdilaNet, self).__init__()
#定义第⼀个卷积层
#卷积后:(1*28*28)->(16*26*26)
nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1,dilation=2),
nn.ReLU(),
nn.AvgPool2d(2,2)#(16*26*26)->(16*13*13)
)
)
#定义第⼆个卷积层
用subplot函数v2 = nn.Sequential(
nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3,stride=1,padding=0,dilation=2), #卷积操作:(16*13*13)->(32*9*9)
nn.ReLU(),
nn.AvgPool2d(2,2)#(32*9*9)->(32*4*4)
)
self.classifier = nn.Sequential(
nn.Linear(32*4*4,256),
nn.ReLU(),
nn.Linear(256,128),
nn.ReLU(),
nn.Linear(128,10)
)
#定义⽹络的前向传播路径
def forward(self,x):
x = v1(x)
x = v2(x)
x = x.view(x.size(0),-1)#展平多维的卷积图层
output = self.classifier(x)
return output
#输出⽹络结构
myconvdilanet = MyConvdilaNet()
print(myconvdilanet)
#对模型进⾏训练
optimizer = Adam(myconvdilanet.parameters(),lr=0.0003)
criterion = nn.CrossEntropyLoss()
myconvdilanet,train_process = train_model(myconvdilanet,train_loader,0.8,criterion,optimizer,num_e
pochs=25) #可视化模型训练过程中的精度和损失函数
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(train_process.epoch,ain_loss_all,"ro-",label="Train loss")
plt.plot(train_process.epoch,train_process.val_loss_all,"bs-",label="Val loss")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("loss")
plt.subplot(1,2,2)
plt.plot(train_process.epoch,ain_acc_all,"ro-",label="Train acc")
plt.plot(train_process.epoch,train_process.val_acc_all,"bs-",label="Val acc")
plt.xlabel("epoch")
plt.ylabel("acc")
plt.legend()
plt.show()
#对测试集进⾏预测,并可视化预测效果
myconvdilanet.eval()
output = myconvdilanet(test_data_x)
pre_lab = torch.argmax(output,1)
acc = accuracy_score(test_data_y,pre_lab)
print("在测试集上的预测精度为:",acc)
#计算混淆矩阵并可视化
conf_mat = confusion_matrix(test_data_y,pre_lab)
df_cm = pd.DataFrame(conf_mat,index=class_label,columns=class_label)
heatmap = sns.heatmap(df_cm,annot=True,fmt="d",cmap="YlGnBu")
heatmap.yaxis.set_ticklabels(_ticklabels(),rotation=0,ha='right')
heatmap.xaxis.set_ticklabels(_ticklabels(),rotation=45,ha='right')
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论