성능측정 함수 추가

  1. train.py에 라이브러리 추가

    from sklearn.metrics import f1_score
    
  2. train.py 파일에 코드 추가(빨간색 표기)

    # f1_score에 사용될 변수 추가
    val_best_f1 = 0
    train_best_f1 = 0
    
    for epoch in range(args.epochs):
            # train loop
            model.train()
            loss_value = 0
            matches = 0
    
            # f1_score에 사용될 변수선언
            train_y_pred = []
            train_y_true = []
    
            for idx, train_batch in enumerate(train_loader):
                inputs, labels = train_batch
                inputs = inputs.to(device)
                labels = labels.to(device)
    
                optimizer.zero_grad()
    
                outs = model(inputs)
                preds = torch.argmax(outs, dim=-1)
                loss = criterion(outs, labels)
    
                # 모든 배치의 예측값, 라벨값 저장
                train_y_pred.extend(preds.tolist())
                train_y_true.extend(labels.tolist())
    
                loss.backward()
                optimizer.step()
    
                loss_value += loss.item()
                matches += (preds == labels).sum().item()
                if (idx + 1) % args.log_interval == 0:
                    train_loss = loss_value / args.log_interval
                    train_acc = matches / args.batch_size / args.log_interval
                    current_lr = get_lr(optimizer)
                    print(
                        f"Epoch[{epoch}/{args.epochs}]({idx + 1}/{len(train_loader)}) || "
                        f"training loss {train_loss:4.4} || training accuracy {train_acc:4.2%} || lr {current_lr}"
                    )
                    logger.add_scalar("Train/loss", train_loss, epoch * len(train_loader) + idx)
                    logger.add_scalar("Train/accuracy", train_acc, epoch * len(train_loader) + idx)
    
                    loss_value = 0
                    matches = 0
    
            # train과정의 f1_score 계산
            train_f1=f1_score(train_y_true, train_y_pred, average='macro')
    
            # train과정의 f1_score 텐서보드에 추가
    				# 각 배치마다 기록되는 것이 아니기 때문에 텐서보드에 따로 표시됨
            if train_f1 > train_best_f1:
                print(f"New best model for train_f1_score : {train_f1:4.2%}! ")
                train_best_f1 = train_f1
            logger.add_scalar("train/f1score",train_f1, epoch)
    
            scheduler.step()
    
            # val loop
            with torch.no_grad():
                print("Calculating validation results...")
                model.eval()
                val_loss_items = []
                val_acc_items = []
    
                # f1_score에 사용될 변수선언
                val_y_pred = []
                val_y_true = []
                
                figure = None
                for val_batch in val_loader:
                    inputs, labels = val_batch
                    inputs = inputs.to(device)
                    labels = labels.to(device)
    
                    outs = model(inputs)
                    preds = torch.argmax(outs, dim=-1)
    
                    # 모든 배치의 예측값, 라벨값 저장
                    val_y_pred.extend(preds.tolist())
                    val_y_true.extend(labels.tolist())
    
                    loss_item = criterion(outs, labels).item()
                    acc_item = (labels == preds).sum().item()
                    val_loss_items.append(loss_item)
                    val_acc_items.append(acc_item)
    
                    if figure is None:
                        inputs_np = torch.clone(inputs).detach().cpu().permute(0, 2, 3, 1).numpy()
                        inputs_np = dataset_module.denormalize_image(inputs_np, dataset.mean, dataset.std)
                        figure = grid_image(
                            inputs_np, labels, preds, n=16, shuffle=args.dataset != "MaskSplitByProfileDataset"
                        )
    
                val_loss = np.sum(val_loss_items) / len(val_loader)
                val_acc = np.sum(val_acc_items) / len(val_set)
                best_val_loss = min(best_val_loss, val_loss)
    
                # eval과정의 f1_score 계산
                val_f1=f1_score(val_y_true, val_y_pred, average='macro')
    
                
                if val_acc > best_val_acc:
                    #print(f"New best model for val accuracy : {val_acc:4.2%}! saving the best model..")
                    #torch.save(model.module.state_dict(), f"{save_dir}/best.pth")
                    best_val_acc = val_acc
                
                # eval과정의 f1_score 텐서보드에 추가
                if val_f1 > val_best_f1:
                    print(f"New best model for val_f1_score : {val_f1:4.2%}! saving the best model..")
                    torch.save(model.module.state_dict(), f"{save_dir}/best.pth")
                    val_best_f1 = val_f1
    
                
                torch.save(model.module.state_dict(), f"{save_dir}/last.pth")
                print(
                    f"[Val] acc : {val_acc:4.2%}, loss: {val_loss:4.2} || "
                    f"best acc : {best_val_acc:4.2%}, best loss: {best_val_loss:4.2} ||"
                    f"f1_score : {f1:4.2%}"
                )
                logger.add_scalar("Val/loss", val_loss, epoch)
                logger.add_scalar("Val/accuracy", val_acc, epoch)
                logger.add_scalar("Val/f1score",f1, epoch)
                logger.add_figure("results", figure, epoch)
    

Loss합수 추가

  1. loss.py 파일 init() 합수에 ‘classes=18’ 로 매개변수 값 수정

Untitled

  1. train.py 파일 실행 시 인자로 지정하여 실행

    >>> python train.py --criterion 'f1' --model 'MyModel_DenseNet18' --name 'exp_DenseNet1'