【ReID】【代码注释】训练数据和主程序 deep-person-reid/train_img_model_xent.py

622 阅读6分钟

源码URL: github.com/michuanhaoh…

训练数据和主程序的读源码注释,与源码有出入

from __future__ import absolute_import
import os
import sys
import time
import datetime
import argparse
import os.path as osp
import numpy as np

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn  # cuda下面有一个更小的库,cudnn。若想更好地发挥dnn的性能,可以使用
from torch.optim import lr_scheduler

import models
from util.losses import CrossEntropyLoss
from util import data_manager
from util import transforms as T
from util.dataset_loader import ImageDataset
from util.utils import Logger
from util.utils import AverageMeter, Logger, save_checkpoint
from util.eval_metrics import evaluate
from util.optimizers import init_optim


from IPython import embed

parser = argparse.ArgumentParser(description='Train reid with cross entropy loss')
# Datasets
parser.add_argument('--root', type=str, default='F:/Market-1501', help="root path to data directory")
parser.add_argument('-d', '--dataset', type=str, default='market1501',
                    choices=data_manager.get_names())
parser.add_argument('-j', '--workers', default=4, type=int,  # pytorch daraloader的线程数,越大,读数据越快,消耗资源越多
                    help="number of data loading workers (default: 4)")
parser.add_argument('--height', type=int, default=256,  # 图像的高
                    help="height of an image (default: 256)")
parser.add_argument('--width', type=int, default=128,  # 图像的宽
                    help="width of an image (default: 128)")


# CUHK03-specific setting (ignore it)  # 可以忽略,CUHK03数据需要的,这里用不到
parser.add_argument('--split-id', type=int, default=0, help="split index")
parser.add_argument('--cuhk03-labeled', action='store_true',
                    help="whether to use labeled images, if false, detected images are used (default: False)")
parser.add_argument('--cuhk03-classic-split', action='store_true',
                    help="whether to use classic split by Li et al. CVPR'14 (default: False)")
parser.add_argument('--use-metric-cuhk03', action='store_true',
                    help="whether to use cuhk03-metric (default: False)")

# Optimization options  # 训练的设置
parser.add_argument('--labelsmooth', action='store_true', help="label smooth")  # 防止过拟合的小trick,这里用不到
parser.add_argument('--optim', type=str, default='adam', help="optimization algorithm (see optimizers.py)")  # 用哪个优化器
parser.add_argument('--max-epoch', default=60, type=int,  # 最大训练epoch,即最多训练多少个epoch
                    help="maximum epochs to run")
parser.add_argument('--start-epoch', default=0, type=int,  # 从哪个epoch开始训练
                    help="manual epoch number (useful on restarts)")
parser.add_argument('--train-batch', default=32, type=int,  # 训练的batch
                    help="train batch size")
parser.add_argument('--test-batch', default=32, type=int, help="test batch size")  # 测试的batch
parser.add_argument('--lr', '--learning-rate', default=0.0002, type=float,  # 学习率
                    help="initial learning rate")
parser.add_argument('--stepsize', default=20, type=int,  # 训练多少个epoch要降学习率,
                    help="stepsize to decay learning rate (>0 means this is enabled)")
parser.add_argument('--gamma', default=0.1, type=float,  # 学习率下降率,每次降学习率为lr*0.1
                    help="learning rate decay")
parser.add_argument('--weight-decay', default=5e-04, type=float,  # 防止过拟合
                    help="weight decay (default: 5e-04)")
# Architecture
parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.get_names())  # 选择哪个模型
# Miscs
parser.add_argument('--print-freq', type=int, default=10, help="print frequency")  # 打印频率,多少个batch打印一次log
parser.add_argument('--seed', type=int, default=1, help="manual seed")  # 控制随机参数,保证每次结果稳定复现
parser.add_argument('--resume', type=str, default='', metavar='PATH')  # 从哪个模型开始恢复,读某个epoch存下的checkpoint
parser.add_argument('--evaluate', action='store_true', default=True, help="evaluation only")  # 默认是训练模式,所以不开,打开就是测试模式,只测试不训练
parser.add_argument('--eval-step', type=int, default=-1,   # 训练多少个epoch做一次测试,默认是-1.即训练完了再测试
                    help="run evaluation for every N epochs (set to -1 to test after training)")
parser.add_argument('--start-eval', type=int, default=0, help="start to evaluate after specific epoch")  # 多少个epoch开始测试
parser.add_argument('--save-dir', type=str, default='log')  # 存放log,checkpoint的路径
parser.add_argument('--use-cpu', action='store_true', help="use cpu")  # 用不用cpu
parser.add_argument('--gpu-devices', default='0', type=str, help='gpu device ids for CUDA_VISIBLE_DEVICES')  # 用哪一个GPU,默认0号GPU

args = parser.parse_args()

def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids= [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids= [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features= model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)


        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    # feature normlization
    qf = 1. * qf / (torch.norm(qf, 2, dim = -1, keepdim=True).expand_as(qf) + 1e-12)
    gf = 1. * gf / (torch.norm(gf, 2, dim = -1, keepdim=True).expand_as(gf) + 1e-12)
    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()


    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]



def main():
    use_gpu = torch.cuda.is_available()  # gpu是否能用
    if args.use_cpu: use_gpu = False  # 如果之前设置了使用cpu,则不用gpu
    pin_memory = True if use_gpu else False  # 如果使用gpu,则节省内存模式打开

    if not args.evaluate:  # 如果不是测试模式
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))  # log文件存为这个
    else:  # 如果是测试模式
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))  # log文件存
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))  # 使用的gpu打印
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices  # 使用指定gpu
        cudnn.benchmark = True  # 让卷积算的更快
        torch.cuda.manual_seed_all(args.seed)  # 随机数种子,保证随机的每一次结果都能够复现
    else:
        print("Currently using CPU (GPU is highly recommended)")

    dataset = data_manager.init_img_dataset(root=args.root, name=args.dataset, split_id=args.split_id,  # dataset初始化
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,)


    # dataloader & augmentation train query gallery
    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),  # 随机裁剪(之前定义过)
        T.RandomHorizontalFlip(),  # 水平随机翻转
        T.ToTensor(),  # 转为tensor
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 三个通道归一化,数字是约定俗成的小数
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),  # 测试不用增广,但还是要resize到最终尺寸
        T.ToTensor(),  # 转为tensor
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 三个通道归一化,数字是约定俗成的小数
    ])

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True  # drop_last为是否把尾部的数据丢掉,比如100张图片,batchsize为32张,
                                               # 用了3个batch后训了96张图片,还剩下4张图片,这时候就不训练这4张了,直接丢掉。
                                               # 在下一个epoch,这4张有可能就会被重新采用。训练epoch足够多,每张图片被采用的概率是一样的
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False, shuffle=False  # 测试的时候,每一个图都要用到,drop_last要False。
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False, shuffle=False
    )

    # 新建模型
    print("Initializing model: {}".format(args.arch))  # 抄的
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'softmax'})
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))  # 抄的

    criterion_class = nn.CrossEntropyLoss()
    optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)  # 模型所有参数参与更新
    """
    optimizer = init_optim(args.optim, nn.Sequentail([
        model.conv1,
        model.conv2,
    ]))  # 若为此,只更新两层
    """

    if args.stepsize > 0:  # 如果有变化学习率
        scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)  # 学习率变化

    start_epoch = args.start_epoch  # 初始epoch

    if args.resume:  # 训练读档
        print("Loading checkpoint from '{}'".format(args.resume))  #
        checkpoint = torch.load(args.resume)  # 把checkpoint读进来
        model.load_state_dict(checkpoint['state_dict'])  #
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()  # 模型并行
#        model.module.parameters()  # 经过DataParallel,参数要加这一句

    if args.evaluate:  # 如果只测试
        print('Evaluate only!')
        test(model, queryloader, galleryloader, use_gpu)  # 调用测试函数
        return 0

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_class, optimizer, trainloader, use_gpu)  # 调用训练函数
        train_time += round(time.time() - start_train_time)  # 计算每次训练周期的时间

        if args.stepsize > 0: scheduler.step()  # 如果有学习率衰减,执行

        # 定义多少个epoch做一个测试
        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)  # 调用测试函数,得到rank1准确率
            is_best = rank1 > best_rank1
            if is_best:  # 更新
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:  # 若使用gpu
                state_dict = model.module.state_dict()  # 用这句话得到参数
            else:  # 若使用cpu
                state_dict = model.state_dict()  # 用这句话得到参数
            # 保存结果
            save_checkpoint({
                'state_dict': state_dict,  # 保存当前的参数
                'rank1': rank1,  # 保存当前的rank1
                'epoch': epoch,  # 保存当前的epoch
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))  # 打包

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))


# 训练函数
def train(epoch, model, criterion_class, optimizer, trainloader, use_gpu):
    model.train()  # 训练模式

    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):  # 吐数据,这里camera id用不到,省内存把它置空
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()  # 放进gpu

        # measure data loading time
        data_time.update(time.time() - end)  # 读数据时间

        outputs = model(imgs)  # 前传
        loss = criterion_class(outputs, pids)  # 算损失
        optimizer.zero_grad()  # 梯度清空
        loss.backward()  # 算梯度
        optimizer.step()  # 用梯度更新参数

        batch_time.update(time.time() - end)  # 前传反传时间
        end = time.time()  # 结束时间
        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:  # 每十个batch打印一次训练信息
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses))


if __name__ == '__main__':
    main()


运行参考如下:

train_class.png