李沐动手学深度学习V2-实战Kaggle比赛:狗的品种识别(ImageNet Dogs)和代码实现
一. 实战Kaggle比赛:狗的品种识别(ImageNet Dogs)1. 介绍在这场比赛中,将识别120类不同品种的狗,这个数据集是ImageNet的数据集子集,与 CIFAR-10数据集中的图像不同, ImageNet数据集中的图像更高更宽,且尺寸不一。比赛网址:https://www.kaggle.com/c/dog-breed-identification比赛数据集分为训练集和测试集,分别
一. 实战Kaggle比赛:狗的品种识别(ImageNet Dogs)
1. 介绍
在这场比赛中,将识别120类不同品种的狗,这个数据集是ImageNet的数据集子集,与 CIFAR-10数据集中的图像不同, ImageNet数据集中的图像更高更宽,且尺寸不一。比赛网址:https://www.kaggle.com/c/dog-breed-identification
比赛数据集分为训练集和测试集,分别包含RGB(彩色)通道的10222张、10357张JPEG图像。 在训练数据集中,有120种犬类,如拉布拉多、贵宾、腊肠、萨摩耶、哈士奇、吉娃娃和约克夏等。
2. 获取数据集
下载数据集后在…/data文件夹中解压,整个数据集目录如下,文件夹train/和test/分别包含训练和测试狗图像,labels.csv包含训练图像的标签:
1)…/data/dog-breed-identification/labels.csv
2)…/data/dog-breed-identification/sample_submission.csv
3)…/data/dog-breed-identification/train
4)…/data/dog-breed-identification/test
下面只使用完整数据集的小规模样本
#获取数据集
import torch
import torchvision.transforms
import torch.utils.data
from torch import nn
import pandas as pd
import torch.nn.functional
d2l.torch.DATA_HUB['dog_tiny'] = (d2l.torch.DATA_URL+'kaggle_dog_tiny.zip','0cb91d09b814ecdc07b50f31f8dcad3e81d6a86d')
## 如果使用Kaggle比赛的完整数据集,请将下面的变量更改为False
demo = True
if demo:
data_dir = d2l.torch.download_extract('dog_tiny')
else:
data_dir = '../data/dog-breed-identification'
def read_csv_data(fname):
with open(fname,'r') as f:
lines = f.readlines()[1:]
#读取文件中每一行数据,获取数据集的标签和对应的图片索引号,需要去除第一行标题名称
tokens = [line.rstrip().split(',') for line in lines]
return dict(((name,label) for name,label in tokens))
labels = read_csv_data(os.path.join(data_dir,'labels.csv'))
print('样本数:',len(labels))
print('类别数:',len(set(labels.values())))
3.整理数据集
将原始训练集中拆分验证集,然后将图像移动到按标签分组的子文件夹中
def copy_file(fname,target_dir):
#创建文件夹,如果存在,就不再重复创建
os.makedirs(name=target_dir,exist_ok=True)
#将源文件图片复制到指定文件夹下
shutil.copy(fname,target_dir)
#从训练集中拆分一部分图片用作验证集,然后复制到指定文件夹下面
def split_copy_train_valid(data_dir,labels,split_to_valid_ratio):
#labels.values()是具体的标签值,通过使用collections.Counter()函数对训练集类别数目进行计数,然后从大到小排列,获取最少的一类数目
split_num = collections.Counter(labels.values()).most_common()[-1][1]
#获取从训练集中每一类需要选出多少个样本作为验证集
num_valid_per_label = max(1,math.floor(split_num*split_to_valid_ratio))
valid_label_count = {}
for train_file in os.listdir(os.path.join(data_dir,'train')):
#获取当前图片的label
label = labels[train_file.split('.')[0]]
train_file_path = os.path.join(data_dir,'train',train_file)
absolute_path = os.path.join(data_dir,'train_valid_test')
#复制训练集的图片到'train_valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train_valid',label))
if label not in valid_label_count or valid_label_count[label]<num_valid_per_label:
# 复制训练集的图片到'valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'valid',label))
valid_label_count[label] = valid_label_count.get(label,0)+1
else:
# 复制训练集的图片到'train'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train',label))
return num_valid_per_label
#复制测试集的图片到指定文件夹下
def copy_test(data_dir):
for test_file in os.listdir(os.path.join(data_dir,'test')):
# 复制测试集的图片到'test'文件夹下
copy_file(os.path.join(data_dir,'test',test_file),os.path.join(data_dir,'train_valid_test','test','unknown'))
#读取训练数据标签、拆分验证集并整理训练集。
def copy_ImageNetDogs_data(data_dir,split_to_valid_ratio):
labels = read_csv_data(fname=os.path.join(data_dir,'labels.csv'))
split_copy_train_valid(data_dir,labels,split_to_valid_ratio)
copy_test(data_dir)
batch_size = 32 if demo else 128
split_to_valid_ratio = 0.1
copy_ImageNetDogs_data(data_dir,split_to_valid_ratio)
4.图像增广
transform_train = torchvision.transforms.Compose([
# 随机裁剪图像,所得图像为原始面积的0.08到1之间,高宽比在3/4和4/3之间。
# 然后,缩放图像以创建224x224的新图像
torchvision.transforms.RandomResizedCrop(224,scale=(0.08,1.0),ratio=(3.0/4.0,4.0/3.0)),
torchvision.transforms.RandomHorizontalFlip(),
# 随机更改亮度,对比度和饱和度
torchvision.transforms.ColorJitter(brightness=0.4,contrast=0.4,saturation=0.4,hue=0.4),
torchvision.transforms.ToTensor(),
# 标准化图像的每个通道
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
transform_test = torchvision.transforms.Compose([
# 从图像中心裁切224x224大小的图片
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
#使用RGB三个通道的均值和标准差来对本次数据集进行图像标准化,因为,在ImageNet上预训练的模型对图像也进行了对RGB三个通道标准化。
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
5.读取数据集
整理完数据集后,使用ImageFolder读取整理后的含原始图像文件的数据集
#ImageFolder重新组织数据集
train_datasets,train_valid_datasets = [torchvision.datasets.ImageFolder(
root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_train)for folder in ['train','train_valid']]
test_datasets,valid_datasets = [torchvision.datasets.ImageFolder(root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_test)for folder in ['test','valid']]
#创建数据集迭代器
train_iter,train_valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=True,drop_last=True)for ds in [train_datasets,train_valid_datasets]]
test_iter,valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=False,drop_last=False)for ds in [test_datasets,valid_datasets]]
6 微调预训练模型
本次比赛的数据集是ImageNet数据集的子集,因此可以使用在完整ImageNet数据集上预训练的模型,然后使用该模型提取图像特征,以便将其输入到定制的小规模输出网络中,本次选择预训练的ResNet-34模型,我们重复使用除输出层以外的其它层(提取特征的层), 然后用一个可以训练的小型自定义输出层网络替换原始输出层,例如堆叠两个全连接的层,节省梯度下降的时间和内存空间。
def get_net(devices):
finetuning_net = nn.Sequential()
finetuning_net.features = torchvision.models.resnet34(pretrained=True)
# 定义一个新的输出网络,共有120个输出类别
finetuning_net.output_layer = nn.Sequential(nn.Linear(in_features=1000,out_features=256),
nn.ReLU(),
nn.Linear(in_features=256,out_features=120))
# 将模型参数分配给用于计算的GPU
finetuning_net = finetuning_net.to(devices[0])
# 设置除输出层以外的其他层权重参数梯度为False
for param in finetuning_net.features.parameters():
param.requires_grad = False
return finetuning_net
#获取预训练模型的输出层的输入,即提取的特征,然后使用提取到的特征作为我们小型自定义输出网络的输入从而得到输出来计算损失。
loss = nn.CrossEntropyLoss(reduction='none')#loss是保存每个批量样本的误差大小,并没有对loss求平均或者求和
#计算验证集中每个批量样本预测值与真实label值的误差求和再求平均
def evaluate_loss(data_iter,net,device):
loss_sum,num = 0.0,0
for X,y in data_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
loss_sum+=loss(y_hat,y).sum()
num+=y.numel()
return (loss_sum/num).to('cpu')
7.定义训练函数
根据模型在验证集上的表现选择模型并调整超参数,模型训练函数train只迭代更新上面自定义输出层的网络的参数,因为其他层网络的参数梯度为False.
def train(net,train_iter,valid_iter,num_epochs,lr,weight_decay,lr_period,lr_decay,devices):
#优化器函数:SGD
#只迭代更新自定义输出网络的参数
optim = torch.optim.SGD((param for param in net.parameters() if param.requires_grad),lr=lr,momentum=0.9,weight_decay=weight_decay)
#每隔四轮,学习率就衰减为lr*lr_decay
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,step_size=lr_period,gamma=lr_decay)
timer,num_batches = d2l.torch.Timer(),len(train_iter)
legend = ['train loss']
if valid_iter is not None:
legend.append('valid loss')
animator = d2l.torch.Animator(xlabel='epoch',xlim=[1,num_epochs],legend=legend)
#GPU计算
net = nn.DataParallel(module=net,device_ids=devices).to(devices[0])
for epoch in range(num_epochs):
accumulator=d2l.torch.Accumulator(2)
net.train()#网络开始训练
for i,(X,y) in enumerate(train_iter):
timer.start()
optim.zero_grad()
X,y = X.to(devices[0]),y.to(devices[0])
y_hat = net(X)
l_sum = loss(y_hat,y).sum()
l_sum.backward()
optim.step()
accumulator.add(l_sum,y.shape[0])
timer.stop()
if i%(num_batches//5)==0 or i == num_batches-1:
animator.add(epoch+(i+1)/num_batches,(accumulator[0]/accumulator[1],None))
net.eval()#训练每一轮结束后,模型需要用于验证数据集
measures = f'train loss {accumulator[0]/accumulator[1]:.3f},\n'
if valid_iter is not None:
valid_loss = evaluate_loss(valid_iter,net,devices[0])
animator.add(epoch+1,(None,valid_loss.detach().cpu()))
measures += f'valid loss {valid_loss:.3f},'
lr_scheduler.step()#判断是否需要进行学习率衰减
print(measures+f'\n{num_epochs*accumulator[1]/timer.sum():.1f} examples/sec,on {str(devices[0])}')
8.训练和验证模型
lr,weight_decay,epochs = 1e-4,1e-4,10
#lr_period和lr_decay分别设置为2和0.9, 因此优化算法的学习率将在每2轮epoch后乘以0.9,从而衰减学习率。
lr_decay,lr_period,devices= 0.9,2,d2l.torch.try_all_gpus()
net = get_net(devices)
train(net,train_iter,valid_iter,epochs,lr,weight_decay,lr_period,lr_decay,devices)
9 测试集预测
当选好合适的超参数后,将所有标记的数据(包括验证集)都用于重新训练模型,训练好后的网络再对测试集进行预测分类.
#测试集预测
net = get_net(devices)
train(net,train_valid_iter,None,epochs,lr,weight_decay,lr_period,lr_decay,devices)
preds = []
for X,_ in test_iter:
X = X.to(devices[0])
output = torch.nn.functional.softmax(net(X),dim=1)#dim=1
preds.extend(output.cpu().detach().numpy())
ids = sorted(os.listdir(os.path.join(data_dir,'train_valid_test','test','unknown')))
with open('submission.csv','w') as f:
f.write('id,'+','.join(train_valid_datasets.classes)+'\n')
for id,pred in zip(ids,preds):
f.write(id.split('.')[0]+','+','.join([str(num) for num in pred])+'\n')
10 小结
- ImageNet数据集中的图像比CIFAR-10图像尺寸大,我们可能会根据模型修改不同数据集上的图像增广操作。
- 要对ImageNet数据集的子集进行分类,我们可以利用在完整ImageNet数据集上进行预训练的模型(除输出层以外的其它层)来提取特征并只训练自定义输出层网络参数,这将减少计算时间和节省内存空间。
11. 只使用完整数据集的小规模样本进行训练的全部代码(lr,weight_decay,epochs,batch_size= 1e-4,1e-4,10,32)
import collections
import math
import os.path
import shutil
import d2l.torch
import torch
import torchvision.transforms
import torch.utils.data
from torch import nn
import pandas as pd
import torch.nn.functional
d2l.torch.DATA_HUB['dog_tiny'] = (d2l.torch.DATA_URL+'kaggle_dog_tiny.zip','0cb91d09b814ecdc07b50f31f8dcad3e81d6a86d')
demo = True
if demo:
data_dir = d2l.torch.download_extract('dog_tiny')
else:
data_dir = '../data/dog-breed-identification'
def read_csv_data(fname):
with open(fname,'r') as f:
lines = f.readlines()[1:]
#读取文件中每一行数据,获取数据集的标签和对应的图片索引号,需要去除第一行标题名称
tokens = [line.rstrip().split(',') for line in lines]
return dict(((name,label) for name,label in tokens))
labels = read_csv_data(os.path.join(data_dir,'labels.csv'))
print('样本数:',len(labels))
print('类别数:',len(set(labels.values())))
def copy_file(fname,target_dir):
#创建文件夹,如果存在,就不再重复创建
os.makedirs(name=target_dir,exist_ok=True)
#将源文件图片复制到指定文件夹下
shutil.copy(fname,target_dir)
#从训练集中拆分一部分图片用作验证集,然后复制到指定文件夹下面
def split_copy_train_valid(data_dir,labels,split_to_valid_ratio):
#labels.values()是具体的标签值,通过使用collections.Counter()函数对训练集类别数目进行计数,然后从大到小排列,获取最少的一类数目
split_num = collections.Counter(labels.values()).most_common()[-1][1]
#获取从训练集中每一类需要选出多少个样本作为验证集
num_valid_per_label = max(1,math.floor(split_num*split_to_valid_ratio))
valid_label_count = {}
for train_file in os.listdir(os.path.join(data_dir,'train')):
#获取当前图片的label
label = labels[train_file.split('.')[0]]
train_file_path = os.path.join(data_dir,'train',train_file)
absolute_path = os.path.join(data_dir,'train_valid_test')
#复制训练集的图片到'train_valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train_valid',label))
if label not in valid_label_count or valid_label_count[label]<num_valid_per_label:
# 复制训练集的图片到'valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'valid',label))
valid_label_count[label] = valid_label_count.get(label,0)+1
else:
# 复制训练集的图片到'train'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train',label))
return num_valid_per_label
#复制测试集的图片到指定文件夹下
def copy_test(data_dir):
for test_file in os.listdir(os.path.join(data_dir,'test')):
# 复制测试集的图片到'test'文件夹下
copy_file(os.path.join(data_dir,'test',test_file),os.path.join(data_dir,'train_valid_test','test','unknown'))
def copy_ImageNetDogs_data(data_dir,split_to_valid_ratio):
labels = read_csv_data(fname=os.path.join(data_dir,'labels.csv'))
split_copy_train_valid(data_dir,labels,split_to_valid_ratio)
copy_test(data_dir)
batch_size = 32 if demo else 128
split_to_valid_ratio = 0.1
copy_ImageNetDogs_data(data_dir,split_to_valid_ratio)
transform_train = torchvision.transforms.Compose([
# 随机裁剪图像,所得图像为原始面积的0.08到1之间,高宽比在3/4和4/3之间。
# 然后,缩放图像以创建224x224的新图像
torchvision.transforms.RandomResizedCrop(224,scale=(0.08,1.0),ratio=(3.0/4.0,4.0/3.0)),
torchvision.transforms.RandomHorizontalFlip(),
# 随机更改亮度,对比度和饱和度
torchvision.transforms.ColorJitter(brightness=0.4,contrast=0.4,saturation=0.4,hue=0.4),
torchvision.transforms.ToTensor(),
# 标准化图像的每个通道
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
transform_test = torchvision.transforms.Compose([
# 从图像中心裁切224x224大小的图片
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
#ImageFolder重新组织数据集
train_datasets,train_valid_datasets = [torchvision.datasets.ImageFolder(
root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_train)for folder in ['train','train_valid']]
test_datasets,valid_datasets = [torchvision.datasets.ImageFolder(root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_test)for folder in ['test','valid']]
#创建数据集迭代器
train_iter,train_valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=True,drop_last=True)for ds in [train_datasets,train_valid_datasets]]
test_iter,valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=False,drop_last=False)for ds in [test_datasets,valid_datasets]]
def get_net(devices):
finetuning_net = nn.Sequential()
finetuning_net.features = torchvision.models.resnet34(pretrained=True)
# 定义一个新的输出网络,共有120个输出类别
finetuning_net.output_layer = nn.Sequential(nn.Linear(in_features=1000,out_features=256),
nn.ReLU(),
nn.Linear(in_features=256,out_features=120))
# 将模型参数分配给用于计算的GPU
finetuning_net = finetuning_net.to(devices[0])
# 设置除输出层以外的其他层权重参数梯度为False
for param in finetuning_net.features.parameters():
param.requires_grad = False
return finetuning_net
loss = nn.CrossEntropyLoss(reduction='none')
def evaluate_loss(data_iter,net,device):
loss_sum,num = 0.0,0
for X,y in data_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
loss_sum+=loss(y_hat,y).sum()
num+=y.numel()
return (loss_sum/num).to('cpu')
def train(net,train_iter,valid_iter,num_epochs,lr,weight_decay,lr_period,lr_decay,devices):
#优化器函数:SGD
#只迭代更新自定义输出网络的参数
optim = torch.optim.SGD((param for param in net.parameters() if param.requires_grad),lr=lr,momentum=0.9,weight_decay=weight_decay)
#每隔四轮,学习率就衰减为lr*lr_decay
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,step_size=lr_period,gamma=lr_decay)
timer,num_batches = d2l.torch.Timer(),len(train_iter)
legend = ['train loss']
if valid_iter is not None:
legend.append('valid loss')
animator = d2l.torch.Animator(xlabel='epoch',xlim=[1,num_epochs],legend=legend)
#GPU计算
net = nn.DataParallel(module=net,device_ids=devices).to(devices[0])
for epoch in range(num_epochs):
accumulator=d2l.torch.Accumulator(2)
net.train()#网络开始训练
for i,(X,y) in enumerate(train_iter):
timer.start()
optim.zero_grad()
X,y = X.to(devices[0]),y.to(devices[0])
y_hat = net(X)
l_sum = loss(y_hat,y).sum()
l_sum.backward()
optim.step()
accumulator.add(l_sum,y.shape[0])
timer.stop()
if i%(num_batches//5)==0 or i == num_batches-1:
animator.add(epoch+(i+1)/num_batches,(accumulator[0]/accumulator[1],None))
net.eval()#训练每一轮结束后,模型需要用于验证数据集
measures = f'train loss {accumulator[0]/accumulator[1]:.3f},\n'
if valid_iter is not None:
valid_loss = evaluate_loss(valid_iter,net,devices[0])
animator.add(epoch+1,(None,valid_loss.detach().cpu()))
measures += f'valid loss {valid_loss:.3f},'
lr_scheduler.step()#判断是否需要进行学习率衰减
print(measures+f'\n{num_epochs*accumulator[1]/timer.sum():.1f} examples/sec,on {str(devices[0])}')
lr,weight_decay,epochs = 1e-4,1e-4,10
lr_decay,lr_period,devices= 0.9,2,d2l.torch.try_all_gpus()
net = get_net(devices)
train(net,train_iter,valid_iter,epochs,lr,weight_decay,lr_period,lr_decay,devices)
# #测试集预测
# net = get_net(devices)
# train(net,train_valid_iter,None,epochs,lr,weight_decay,lr_period,lr_decay,devices)
# preds = []
# for X,_ in test_iter:
# X = X.to(devices[0])
# output = torch.nn.functional.softmax(net(X),dim=1)#dim=1
# preds.extend(output.cpu().detach().numpy())
# ids = sorted(os.listdir(os.path.join(data_dir,'train_valid_test','test','unknown')))
# with open('submission.csv','w') as f:
# f.write('id,'+','.join(train_valid_datasets.classes)+'\n')
# for id,pred in zip(ids,preds):
# f.write(id.split('.')[0]+','+','.join([str(num) for num in pred])+'\n')
12 使用完整Kaggle比赛上面的数据集进行训练全部代码(lr,weight_decay,epochs,batch_size = 1e-4,1e-4,10,128)
import collections
import math
import os.path
import shutil
import d2l.torch
import torch
import torchvision.transforms
import torch.utils.data
from torch import nn
import pandas as pd
import torch.nn.functional
d2l.torch.DATA_HUB['dog_tiny'] = (d2l.torch.DATA_URL+'kaggle_dog_tiny.zip','0cb91d09b814ecdc07b50f31f8dcad3e81d6a86d')
demo = False
if demo:
data_dir = d2l.torch.download_extract('dog_tiny')
else:
data_dir = '../data/dog-breed-identification'
def read_csv_data(fname):
with open(fname,'r') as f:
lines = f.readlines()[1:]
#读取文件中每一行数据,获取数据集的标签和对应的图片索引号,需要去除第一行标题名称
tokens = [line.rstrip().split(',') for line in lines]
return dict(((name,label) for name,label in tokens))
labels = read_csv_data(os.path.join(data_dir,'labels.csv'))
print('样本数:',len(labels))
print('类别数:',len(set(labels.values())))
def copy_file(fname,target_dir):
#创建文件夹,如果存在,就不再重复创建
os.makedirs(name=target_dir,exist_ok=True)
#将源文件图片复制到指定文件夹下
shutil.copy(fname,target_dir)
#从训练集中拆分一部分图片用作验证集,然后复制到指定文件夹下面
def split_copy_train_valid(data_dir,labels,split_to_valid_ratio):
#labels.values()是具体的标签值,通过使用collections.Counter()函数对训练集类别数目进行计数,然后从大到小排列,获取最少的一类数目
split_num = collections.Counter(labels.values()).most_common()[-1][1]
#获取从训练集中每一类需要选出多少个样本作为验证集
num_valid_per_label = max(1,math.floor(split_num*split_to_valid_ratio))
valid_label_count = {}
for train_file in os.listdir(os.path.join(data_dir,'train')):
#获取当前图片的label
label = labels[train_file.split('.')[0]]
train_file_path = os.path.join(data_dir,'train',train_file)
absolute_path = os.path.join(data_dir,'train_valid_test')
#复制训练集的图片到'train_valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train_valid',label))
if label not in valid_label_count or valid_label_count[label]<num_valid_per_label:
# 复制训练集的图片到'valid'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'valid',label))
valid_label_count[label] = valid_label_count.get(label,0)+1
else:
# 复制训练集的图片到'train'文件夹下
copy_file(train_file_path,os.path.join(absolute_path,'train',label))
return num_valid_per_label
#复制测试集的图片到指定文件夹下
def copy_test(data_dir):
for test_file in os.listdir(os.path.join(data_dir,'test')):
# 复制测试集的图片到'test'文件夹下
copy_file(os.path.join(data_dir,'test',test_file),os.path.join(data_dir,'train_valid_test','test','unknown'))
def copy_ImageNetDogs_data(data_dir,split_to_valid_ratio):
labels = read_csv_data(fname=os.path.join(data_dir,'labels.csv'))
split_copy_train_valid(data_dir,labels,split_to_valid_ratio)
copy_test(data_dir)
batch_size = 32 if demo else 128
split_to_valid_ratio = 0.1
copy_ImageNetDogs_data(data_dir,split_to_valid_ratio)
transform_train = torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224,scale=(0.08,1.0),ratio=(3.0/4.0,4.0/3.0)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ColorJitter(brightness=0.4,contrast=0.4,saturation=0.4,hue=0.4),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
transform_test = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
#ImageFolder重新组织数据集
train_datasets,train_valid_datasets = [torchvision.datasets.ImageFolder(
root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_train)for folder in ['train','train_valid']]
test_datasets,valid_datasets = [torchvision.datasets.ImageFolder(root=os.path.join(data_dir,'train_valid_test',folder),transform=transform_test)for folder in ['test','valid']]
#创建数据集迭代器
train_iter,train_valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=True,drop_last=True)for ds in [train_datasets,train_valid_datasets]]
test_iter,valid_iter = [torch.utils.data.DataLoader(dataset=ds,batch_size=batch_size,shuffle=False,drop_last=False)for ds in [test_datasets,valid_datasets]]
def get_net(devices):
finetuning_net = nn.Sequential()
finetuning_net.features = torchvision.models.resnet34(pretrained=True)
finetuning_net.output_layer = nn.Sequential(nn.Linear(in_features=1000,out_features=256),
nn.ReLU(),
nn.Linear(in_features=256,out_features=120))
finetuning_net = finetuning_net.to(devices[0])
for param in finetuning_net.features.parameters():
param.requires_grad = False
return finetuning_net
loss = nn.CrossEntropyLoss(reduction='none')
def evaluate_loss(data_iter,net,device):
loss_sum,num = 0.0,0
for X,y in data_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
loss_sum+=loss(y_hat,y).sum()
num+=y.numel()
return (loss_sum/num).to('cpu')
def train(net,train_iter,valid_iter,num_epochs,lr,weight_decay,lr_period,lr_decay,devices):
#优化器函数:SGD
optim = torch.optim.SGD((param for param in net.parameters() if param.requires_grad),lr=lr,momentum=0.9,weight_decay=weight_decay)
#每隔四轮,学习率就衰减为lr*lr_decay
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,step_size=lr_period,gamma=lr_decay)
timer,num_batches = d2l.torch.Timer(),len(train_iter)
legend = ['train loss']
if valid_iter is not None:
legend.append('valid loss')
animator = d2l.torch.Animator(xlabel='epoch',xlim=[1,num_epochs],legend=legend)
#GPU计算
net = nn.DataParallel(module=net,device_ids=devices).to(devices[0])
for epoch in range(num_epochs):
accumulator=d2l.torch.Accumulator(2)
net.train()#网络开始训练
for i,(X,y) in enumerate(train_iter):
timer.start()
optim.zero_grad()
X,y = X.to(devices[0]),y.to(devices[0])
y_hat = net(X)
l_sum = loss(y_hat,y).sum()
l_sum.backward()
optim.step()
accumulator.add(l_sum,y.shape[0])
timer.stop()
if i%(num_batches//5)==0 or i == num_batches-1:
animator.add(epoch+(i+1)/num_batches,(accumulator[0]/accumulator[1],None))
net.eval()#训练每一轮结束后,模型需要用于验证数据集
measures = f'train loss {accumulator[0]/accumulator[1]:.3f},\n'
if valid_iter is not None:
valid_loss = evaluate_loss(valid_iter,net,devices[0])
animator.add(epoch+1,(None,valid_loss.detach().cpu()))
measures += f'valid loss {valid_loss:.3f},'
lr_scheduler.step()#判断是否需要进行学习率衰减
print(measures+f'\n{num_epochs*accumulator[1]/timer.sum():.1f} examples/sec,on {str(devices[0])}')
lr,weight_decay,epochs = 1e-4,1e-4,10
lr_decay,lr_period,devices= 0.9,2,d2l.torch.try_all_gpus()
net = get_net(devices)
train(net,train_iter,valid_iter,epochs,lr,weight_decay,lr_period,lr_decay,devices)
##测试集预测
# net = get_net(devices)
# train(net,train_valid_iter,None,epochs,lr,weight_decay,lr_period,lr_decay,devices)
# preds = []
# for X,_ in test_iter:
# X = X.to(devices[0])
# output = torch.nn.functional.softmax(net(X),dim=1)#dim=1
# preds.extend(output.cpu().detach().numpy())
# ids = sorted(os.listdir(os.path.join(data_dir,'train_valid_test','test','unknown')))
# with open('submission.csv','w') as f:
# f.write('id,'+','.join(train_valid_datasets.classes)+'\n')
# for id,pred in zip(ids,preds):
# f.write(id.split('.')[0]+','+','.join([str(num) for num in pred])+'\n')
更多推荐
所有评论(0)