pytorch訓(xùn)練分類器
發(fā)布日期:2023/9/25 9:32:57 瀏覽量:
pytorch訓(xùn)練分類器
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
#數(shù)據(jù)集加載完的輸出是在[0,1]之間的PILImage,將其標(biāo)準(zhǔn)化為范圍在[-1,1]之間的張量 Compose串聯(lián)多個圖片變換的操作
transform=transforms.Compose(
[transforms.ToTensor(),# 轉(zhuǎn)換為Tensor
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]# 歸一化操作
)
trainset=torchvision.datasets.CIFAR10(root=’./data/’,train=True,download=True,transform=transform)# 從pytorch中下載圖片數(shù)據(jù)集
trainloader=torch.utils.data.DataLoader(trainset,batch_size=4,shuffle=True,num_workers=0)# shuffle隨機打亂
testset=torchvision.datasets.CIFAR10(root=’./data/’,train=False,download=True,transform=transform)
testloader=torch.utils.data.DataLoader(testset,batch_size=4,shuffle=True,num_workers=0)# 進程數(shù)
classes=(’plane’,’car’,’bird’,’cat’,’deer’,’dog’,’frog’,’horse’,’ship’,’truck’)
#輸出圖像的函數(shù)
def imshow(img):
img=img/2+0.5 #不歸一化:歸一話的時候是先減去平均值0.5 ,然后再除以標(biāo)準(zhǔn)偏差0.5
npimg=img.numpy()
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()
# 隨機獲取訓(xùn)練圖片
dataiter=iter(trainloader)
images,labels=dataiter.next()
#顯示圖片
imshow(torchvision.utils.make_grid(images))
print(’’.join(’%5s’%classes[labels[j]] for j in range(4)))
# 定義一個卷積神經(jīng)網(wǎng)絡(luò)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)
def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)
return x
net=Net()
# 定義損失函數(shù):使用多分類的交叉熵損失函數(shù)和隨機梯度下降優(yōu)化器
criterion=nn.CrossEntropyLoss()
optimzer=optim.SGD(net.parameters(),lr=0.01,momentum=0.9)
# 引入動量momentum能夠使得物體在下落過程中,當(dāng)遇到一個局部最優(yōu)的時候有可能在原有動量的基礎(chǔ)上沖出這個局部最優(yōu)點
# 訓(xùn)練網(wǎng)絡(luò)
for epoch in range(2):# 遍歷數(shù)據(jù)迭代器,將數(shù)據(jù)喂給網(wǎng)絡(luò)和優(yōu)化函數(shù)
running_loss=0.0
for i,data in enumerate(trainloader,0):
inputs,labels=data # 獲取輸入
optimzer.zero_grad()# 清除梯度
outputs=net(inputs) #神經(jīng)網(wǎng)絡(luò)的輸出
loss=criterion(outputs,labels)
optimzer.step() #優(yōu)化
# 輸出
running_loss+=loss.item()
if i%2000==1999: #輸出每個2000小批次
print(’[%d],%5d loss:%.3f’%(epoch+1,i+1,running_loss/2000))
running_loss=0.0
print(’Finished Training’)
# 保存模型
PATH=’./cifar_net.pth’
torch.save(net.state_dict(),PATH)
# 使用測試數(shù)據(jù)集測試
dataiter=iter(testloader)
imshow(torchvision.utils.make_grid(images))
print(’GroudTruth:’,’’.join(’%5s’%classes[labels[j]]for j in range(4)))
net=Net()
net.load_state_dict(torch.load(PATH))
# 輸出類別
outputs=net(images)
_,predicted=torch.max(outputs,1)
print(’precited:’,’’.join(’%5s’%classes[predicted[j]]for j in range(4)))
# 判斷網(wǎng)絡(luò)在整個數(shù)據(jù)集上的表現(xiàn)
correct=0
total=0
with torch.no_grad():
for data in testloader:
images,labels=data
outputs=net(images)
_, predicted = torch.max( outputs, 1 )
total+=labels.size(0)
correct+=(predicted==labels).sum().item()
print(’Acc:%d %%’%(100*correct/total))
馬上咨詢: 如果您有業(yè)務(wù)方面的問題或者需求,歡迎您咨詢!我們帶來的不僅僅是技術(shù),還有行業(yè)經(jīng)驗積累。
QQ: 39764417/308460098 Phone: 13 9800 1 9844 / 135 6887 9550 聯(lián)系人:石先生/雷先生