基于ResNet模型微调的自定义图像数据分类

2023-12-20

# Import necessary packages.
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
from torchsummary import summary

import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import time
import os
# Device configuration.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
device(type='cuda')
# Hyper-parameters setting.
batch_size = 256
num_epochs = 30
# Image preprocessing modules.
image_transforms = {
    'train': transforms.Compose([transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
                                 transforms.RandomRotation(degrees=15),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.CenterCrop(size=224),
                                 transforms.ToTensor(),
                                 transforms.Normalize([0.485, 0.456, 0.406],
                                                      [0.229, 0.224, 0.225])]),
    
    'valid':transforms.Compose([transforms.Resize(size=256),
                                transforms.CenterCrop(size=224),
                                transforms.ToTensor(),
                                transforms.Normalize([0.485, 0.456, 0.406],
                                                     [0.229, 0.224, 0.225])]),

    'test':transforms.Compose([transforms.Resize(size=256),
                               transforms.CenterCrop(size=224),
                               transforms.ToTensor(),
                               transforms.Normalize([0.485, 0.456, 0.406],
                                                    [0.229, 0.224, 0.225])])                                                     
}
# Load the Subset-datasets of Caltech 256.
dataset = './Datasets/'

# Set the dataset directory.
train_directory = os.path.join(dataset, 'train')
valid_directory = os.path.join(dataset, 'valid')
test_directory = os.path.join(dataset, 'test')

# Number of classes.
# num_classes = len(os.listdir(valid_directory))
# print(num_classes)  # num_classes is 10.

# Load Data from folders.
data = {
    'train':datasets.ImageFolder(root=train_directory,
                                 transform=image_transforms['train']),
    'valid':datasets.ImageFolder(root=valid_directory,
                                 transform=image_transforms['valid']),
    'test': datasets.ImageFolder(root=test_directory,
                                 transform=image_transforms['test'])                                                                 
}
# Define the DataLoader.
train_data_loader = torch.utils.data.DataLoader(data['train'],
                                                batch_size=batch_size,
                                                shuffle=True)
valid_data_loader = torch.utils.data.DataLoader(data['valid'],
                                                batch_size=batch_size,
                                                shuffle=True)
test_data_loader = torch.utils.data.DataLoader(data['test'],
                                               batch_size=batch_size,
                                               shuffle=False)
# Load pretrained ResNet50 Model.
resnet50 = models.resnet50(pretrained=True).to(device)
print(resnet50)
/home/wsl_ubuntu/anaconda3/envs/xy_trans/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
  warnings.warn(
/home/wsl_ubuntu/anaconda3/envs/xy_trans/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet50_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet50_Weights.DEFAULT` to get the most up-to-date weights.
  warnings.warn(msg)


ResNet(
  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (relu): ReLU(inplace=True)
  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
  (layer1): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer2): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (3): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer3): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (3): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (4): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (5): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer4): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
  (fc): Linear(in_features=2048, out_features=1000, bias=True)
)
# Freeze model parameters, make a preparation for fine-tuning.
for param in resnet50.parameters():
    param.requires_grad = False
# Change the final layer of ResNet50 for Transfer Learning.
fc_inputs = resnet50.fc.in_features

resnet50.fc = nn.Sequential(nn.Linear(fc_inputs, 256),
                            nn.ReLU(),
                            nn.Dropout(0.4),
                            nn.Linear(256, len(os.listdir(valid_directory))),     # Related to the 10 outputs classes.
                            nn.LogSoftmax(dim=1))   # For using NLLLoss()   

# Convert model to be used on GPU.
resnet50 = resnet50.to(device)
# Loss and optimizer.
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(resnet50.parameters())
# Define Train & Validate Utils Function.
def train_and_validate(model, loss_criterion, optimizer, epochs=25):
    """
    Function used to train and validate.
    Parameters:
        :param model: Model to train and validate.
        :param loss_criterion: Loss Criterion to minimize.
        :param optimizer: Optimizer for computing gradients.
        :param epochs: Number of epochs (default=25)
    """
    
    history = []
    best_loss = 100000.0
    best_epoch = None

    for epoch in range(epochs):
        epoch_start = time.time()
        print("Epoch: {}/{}".format(epoch+1, epochs))

        # Set to training mode.
        model.train()

        # Loss and Accuracy within the epoch.
        train_loss = 0.0
        train_acc = 0.0

        valid_loss = 0.0
        valid_acc = 0.0

        for _, (inputs, labels) in enumerate(train_data_loader):
            # Move the data pair to device.
            inputs = inputs.to(device)
            labels = labels.to(device)

            # Forward pass and calculate loss.
            outputs = model(inputs)
            loss = loss_criterion(outputs, labels)

            # Backward and optimize.
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Compute the total loss for the batch and add it to train_loss.
            train_loss += loss.item() * inputs.size(0)

            # Compute the accuracy.
            _, predictions = torch.max(outputs.data, 1)
            correct_counts = predictions.eq(labels.data.view_as(predictions))

            # Convert correct_counts to float and then compute the mean.
            acc = torch.mean(correct_counts.type(torch.FloatTensor))

            # Compute total accuracy in the whole batch and add to train_acc.
            train_acc += acc.item() * inputs.size(0)

            #print("Batch number: {:03d}, Training: Loss: {:.4f}, Accuracy: {:.4f}".format(i, loss.item(), acc.item()))

        # Validation - No gradient tracking needed.
        with torch.no_grad():

            # Set to evaluation mode.
            model.eval()

            # Validation loop.
            for _, (inputs, labels) in enumerate(valid_data_loader):
                inputs = inputs.to(device)
                labels = labels.to(device)

                # Forward pass - compute outputs on input data using the model
                outputs = model(inputs)

                # Compute loss
                loss = loss_criterion(outputs, labels)

                # Compute the total loss for the batch and add it to valid_loss
                valid_loss += loss.item() * inputs.size(0)

                # Calculate validation accuracy
                _, predictions = torch.max(outputs.data, 1)
                correct_counts = predictions.eq(labels.data.view_as(predictions))

                # Convert correct_counts to float and then compute the mean
                acc = torch.mean(correct_counts.type(torch.FloatTensor))

                # Compute total accuracy in the whole batch and add to valid_acc
                valid_acc += acc.item() * inputs.size(0)

                #print("Validation Batch number: {:03d}, Validation: Loss: {:.4f}, Accuracy: {:.4f}".format(j, loss.item(), acc.item()))
        if valid_loss < best_loss:
            best_loss = valid_loss
            best_epoch = epoch
        
        # Find average training loss and train accuracy.
        avg_train_loss = train_loss/len(data['train'])
        avg_train_acc = train_acc/len(data['train'])

        # Find average training loss and training accuracy.
        avg_valid_loss = valid_loss/len(data['valid'])
        avg_valid_acc = valid_acc/len(data['valid'])

        history.append([avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc])

        epoch_end = time.time()

        print("Epoch : {:03d}, Training: Loss - {:.4f}, Accuracy - {:.4f}%, \n\t\tValidation : Loss - {:.4f}, Accuracy - {:.4f}%, Time: {:.4f}s".format(epoch, avg_train_loss, avg_train_acc*100, avg_valid_loss, avg_valid_acc*100, epoch_end-epoch_start))
        
        # Save if the model has best accuracy till now
        torch.save(model, dataset+'_model_'+str(epoch)+'.pt')
            
    return model, history, best_epoch
# Print the model to be trained
summary(resnet50, input_size=(3, 224, 224), batch_size=batch_size, device='cuda')

# Train the model for 25 epochs
# num_epochs = 30
# trained_model, history, best_epoch = train_and_validate(resnet50, loss_func, optimizer, num_epochs)

# torch.save(history, dataset+'_history.pt')
=================================================================
Layer (type:depth-idx)                   Param #
=================================================================
├─Conv2d: 1-1                            (9,408)
├─BatchNorm2d: 1-2                       (128)
├─ReLU: 1-3                              --
├─MaxPool2d: 1-4                         --
├─Sequential: 1-5                        --
|    └─Bottleneck: 2-1                   --
|    |    └─Conv2d: 3-1                  (4,096)
|    |    └─BatchNorm2d: 3-2             (128)
|    |    └─Conv2d: 3-3                  (36,864)
|    |    └─BatchNorm2d: 3-4             (128)
|    |    └─Conv2d: 3-5                  (16,384)
|    |    └─BatchNorm2d: 3-6             (512)
|    |    └─ReLU: 3-7                    --
|    |    └─Sequential: 3-8              (16,896)
|    └─Bottleneck: 2-2                   --
|    |    └─Conv2d: 3-9                  (16,384)
|    |    └─BatchNorm2d: 3-10            (128)
|    |    └─Conv2d: 3-11                 (36,864)
|    |    └─BatchNorm2d: 3-12            (128)
|    |    └─Conv2d: 3-13                 (16,384)
|    |    └─BatchNorm2d: 3-14            (512)
|    |    └─ReLU: 3-15                   --
|    └─Bottleneck: 2-3                   --
|    |    └─Conv2d: 3-16                 (16,384)
|    |    └─BatchNorm2d: 3-17            (128)
|    |    └─Conv2d: 3-18                 (36,864)
|    |    └─BatchNorm2d: 3-19            (128)
|    |    └─Conv2d: 3-20                 (16,384)
|    |    └─BatchNorm2d: 3-21            (512)
|    |    └─ReLU: 3-22                   --
├─Sequential: 1-6                        --
|    └─Bottleneck: 2-4                   --
|    |    └─Conv2d: 3-23                 (32,768)
|    |    └─BatchNorm2d: 3-24            (256)
|    |    └─Conv2d: 3-25                 (147,456)
|    |    └─BatchNorm2d: 3-26            (256)
|    |    └─Conv2d: 3-27                 (65,536)
|    |    └─BatchNorm2d: 3-28            (1,024)
|    |    └─ReLU: 3-29                   --
|    |    └─Sequential: 3-30             (132,096)
|    └─Bottleneck: 2-5                   --
|    |    └─Conv2d: 3-31                 (65,536)
|    |    └─BatchNorm2d: 3-32            (256)
|    |    └─Conv2d: 3-33                 (147,456)
|    |    └─BatchNorm2d: 3-34            (256)
|    |    └─Conv2d: 3-35                 (65,536)
|    |    └─BatchNorm2d: 3-36            (1,024)
|    |    └─ReLU: 3-37                   --
|    └─Bottleneck: 2-6                   --
|    |    └─Conv2d: 3-38                 (65,536)
|    |    └─BatchNorm2d: 3-39            (256)
|    |    └─Conv2d: 3-40                 (147,456)
|    |    └─BatchNorm2d: 3-41            (256)
|    |    └─Conv2d: 3-42                 (65,536)
|    |    └─BatchNorm2d: 3-43            (1,024)
|    |    └─ReLU: 3-44                   --
|    └─Bottleneck: 2-7                   --
|    |    └─Conv2d: 3-45                 (65,536)
|    |    └─BatchNorm2d: 3-46            (256)
|    |    └─Conv2d: 3-47                 (147,456)
|    |    └─BatchNorm2d: 3-48            (256)
|    |    └─Conv2d: 3-49                 (65,536)
|    |    └─BatchNorm2d: 3-50            (1,024)
|    |    └─ReLU: 3-51                   --
├─Sequential: 1-7                        --
|    └─Bottleneck: 2-8                   --
|    |    └─Conv2d: 3-52                 (131,072)
|    |    └─BatchNorm2d: 3-53            (512)
|    |    └─Conv2d: 3-54                 (589,824)
|    |    └─BatchNorm2d: 3-55            (512)
|    |    └─Conv2d: 3-56                 (262,144)
|    |    └─BatchNorm2d: 3-57            (2,048)
|    |    └─ReLU: 3-58                   --
|    |    └─Sequential: 3-59             (526,336)
|    └─Bottleneck: 2-9                   --
|    |    └─Conv2d: 3-60                 (262,144)
|    |    └─BatchNorm2d: 3-61            (512)
|    |    └─Conv2d: 3-62                 (589,824)
|    |    └─BatchNorm2d: 3-63            (512)
|    |    └─Conv2d: 3-64                 (262,144)
|    |    └─BatchNorm2d: 3-65            (2,048)
|    |    └─ReLU: 3-66                   --
|    └─Bottleneck: 2-10                  --
|    |    └─Conv2d: 3-67                 (262,144)
|    |    └─BatchNorm2d: 3-68            (512)
|    |    └─Conv2d: 3-69                 (589,824)
|    |    └─BatchNorm2d: 3-70            (512)
|    |    └─Conv2d: 3-71                 (262,144)
|    |    └─BatchNorm2d: 3-72            (2,048)
|    |    └─ReLU: 3-73                   --
|    └─Bottleneck: 2-11                  --
|    |    └─Conv2d: 3-74                 (262,144)
|    |    └─BatchNorm2d: 3-75            (512)
|    |    └─Conv2d: 3-76                 (589,824)
|    |    └─BatchNorm2d: 3-77            (512)
|    |    └─Conv2d: 3-78                 (262,144)
|    |    └─BatchNorm2d: 3-79            (2,048)
|    |    └─ReLU: 3-80                   --
|    └─Bottleneck: 2-12                  --
|    |    └─Conv2d: 3-81                 (262,144)
|    |    └─BatchNorm2d: 3-82            (512)
|    |    └─Conv2d: 3-83                 (589,824)
|    |    └─BatchNorm2d: 3-84            (512)
|    |    └─Conv2d: 3-85                 (262,144)
|    |    └─BatchNorm2d: 3-86            (2,048)
|    |    └─ReLU: 3-87                   --
|    └─Bottleneck: 2-13                  --
|    |    └─Conv2d: 3-88                 (262,144)
|    |    └─BatchNorm2d: 3-89            (512)
|    |    └─Conv2d: 3-90                 (589,824)
|    |    └─BatchNorm2d: 3-91            (512)
|    |    └─Conv2d: 3-92                 (262,144)
|    |    └─BatchNorm2d: 3-93            (2,048)
|    |    └─ReLU: 3-94                   --
├─Sequential: 1-8                        --
|    └─Bottleneck: 2-14                  --
|    |    └─Conv2d: 3-95                 (524,288)
|    |    └─BatchNorm2d: 3-96            (1,024)
|    |    └─Conv2d: 3-97                 (2,359,296)
|    |    └─BatchNorm2d: 3-98            (1,024)
|    |    └─Conv2d: 3-99                 (1,048,576)
|    |    └─BatchNorm2d: 3-100           (4,096)
|    |    └─ReLU: 3-101                  --
|    |    └─Sequential: 3-102            (2,101,248)
|    └─Bottleneck: 2-15                  --
|    |    └─Conv2d: 3-103                (1,048,576)
|    |    └─BatchNorm2d: 3-104           (1,024)
|    |    └─Conv2d: 3-105                (2,359,296)
|    |    └─BatchNorm2d: 3-106           (1,024)
|    |    └─Conv2d: 3-107                (1,048,576)
|    |    └─BatchNorm2d: 3-108           (4,096)
|    |    └─ReLU: 3-109                  --
|    └─Bottleneck: 2-16                  --
|    |    └─Conv2d: 3-110                (1,048,576)
|    |    └─BatchNorm2d: 3-111           (1,024)
|    |    └─Conv2d: 3-112                (2,359,296)
|    |    └─BatchNorm2d: 3-113           (1,024)
|    |    └─Conv2d: 3-114                (1,048,576)
|    |    └─BatchNorm2d: 3-115           (4,096)
|    |    └─ReLU: 3-116                  --
├─AdaptiveAvgPool2d: 1-9                 --
├─Sequential: 1-10                       --
|    └─Linear: 2-17                      524,544
|    └─ReLU: 2-18                        --
|    └─Dropout: 2-19                     --
|    └─Linear: 2-20                      2,570
|    └─LogSoftmax: 2-21                  --
=================================================================
Total params: 24,035,146
Trainable params: 527,114
Non-trainable params: 23,508,032
=================================================================
# Train the model for 25 epochs
trained_model, history, best_epoch = train_and_validate(resnet50, criterion, optimizer, num_epochs)

torch.save(history, dataset+'_history.pt')
Epoch: 1/30
Epoch : 000, Training: Loss - 2.1844, Accuracy - 21.0000%, 
		Validation : Loss - 1.4458, Accuracy - 87.0000%, Time: 23.0077s
Epoch: 2/30
Epoch : 001, Training: Loss - 1.4131, Accuracy - 71.6667%, 
		Validation : Loss - 0.8001, Accuracy - 85.0000%, Time: 11.8903s
Epoch: 3/30
Epoch : 002, Training: Loss - 0.8666, Accuracy - 87.5000%, 
		Validation : Loss - 0.4689, Accuracy - 97.0000%, Time: 12.4398s
Epoch: 4/30
Epoch : 003, Training: Loss - 0.5742, Accuracy - 88.1667%, 
		Validation : Loss - 0.3205, Accuracy - 96.0000%, Time: 11.9200s
Epoch: 5/30
Epoch : 004, Training: Loss - 0.3776, Accuracy - 93.1667%, 
		Validation : Loss - 0.2615, Accuracy - 97.0000%, Time: 11.8079s
Epoch: 6/30
Epoch : 005, Training: Loss - 0.2893, Accuracy - 94.1667%, 
		Validation : Loss - 0.2209, Accuracy - 95.0000%, Time: 11.7730s
Epoch: 7/30
Epoch : 006, Training: Loss - 0.2247, Accuracy - 95.0000%, 
		Validation : Loss - 0.2052, Accuracy - 96.0000%, Time: 11.8106s
Epoch: 8/30
Epoch : 007, Training: Loss - 0.1790, Accuracy - 96.3333%, 
		Validation : Loss - 0.1859, Accuracy - 95.0000%, Time: 11.8970s
Epoch: 9/30
Epoch : 008, Training: Loss - 0.1688, Accuracy - 96.6667%, 
		Validation : Loss - 0.1819, Accuracy - 96.0000%, Time: 11.7606s
Epoch: 10/30
Epoch : 009, Training: Loss - 0.1302, Accuracy - 97.3333%, 
		Validation : Loss - 0.1788, Accuracy - 94.0000%, Time: 11.7755s
Epoch: 11/30
Epoch : 010, Training: Loss - 0.1205, Accuracy - 97.0000%, 
		Validation : Loss - 0.1410, Accuracy - 98.0000%, Time: 11.6803s
Epoch: 12/30
Epoch : 011, Training: Loss - 0.1158, Accuracy - 97.3333%, 
		Validation : Loss - 0.1378, Accuracy - 98.0000%, Time: 11.6497s
Epoch: 13/30
Epoch : 012, Training: Loss - 0.0967, Accuracy - 98.0000%, 
		Validation : Loss - 0.1727, Accuracy - 96.0000%, Time: 11.7946s
Epoch: 14/30
Epoch : 013, Training: Loss - 0.0874, Accuracy - 98.1667%, 
		Validation : Loss - 0.1780, Accuracy - 95.0000%, Time: 11.9912s
Epoch: 15/30
Epoch : 014, Training: Loss - 0.0901, Accuracy - 97.8333%, 
		Validation : Loss - 0.1570, Accuracy - 96.0000%, Time: 12.9568s
Epoch: 16/30
Epoch : 015, Training: Loss - 0.0746, Accuracy - 98.1667%, 
		Validation : Loss - 0.1415, Accuracy - 96.0000%, Time: 12.2158s
Epoch: 17/30
Epoch : 016, Training: Loss - 0.0719, Accuracy - 98.3333%, 
		Validation : Loss - 0.1419, Accuracy - 95.0000%, Time: 12.5166s
Epoch: 18/30
Epoch : 017, Training: Loss - 0.0780, Accuracy - 98.0000%, 
		Validation : Loss - 0.1261, Accuracy - 98.0000%, Time: 12.3080s
Epoch: 19/30
Epoch : 018, Training: Loss - 0.0710, Accuracy - 98.1667%, 
		Validation : Loss - 0.1370, Accuracy - 97.0000%, Time: 11.9036s
Epoch: 20/30
Epoch : 019, Training: Loss - 0.0686, Accuracy - 98.3333%, 
		Validation : Loss - 0.1563, Accuracy - 97.0000%, Time: 11.8692s
Epoch: 21/30
Epoch : 020, Training: Loss - 0.0649, Accuracy - 98.8333%, 
		Validation : Loss - 0.1606, Accuracy - 96.0000%, Time: 12.0231s
Epoch: 22/30
Epoch : 021, Training: Loss - 0.0528, Accuracy - 99.0000%, 
		Validation : Loss - 0.1435, Accuracy - 96.0000%, Time: 11.9158s
Epoch: 23/30
Epoch : 022, Training: Loss - 0.0606, Accuracy - 98.1667%, 
		Validation : Loss - 0.1376, Accuracy - 97.0000%, Time: 11.8283s
Epoch: 24/30
Epoch : 023, Training: Loss - 0.0606, Accuracy - 98.8333%, 
		Validation : Loss - 0.1445, Accuracy - 96.0000%, Time: 11.7824s
Epoch: 25/30
Epoch : 024, Training: Loss - 0.0530, Accuracy - 99.1667%, 
		Validation : Loss - 0.1682, Accuracy - 94.0000%, Time: 11.7249s
Epoch: 26/30
Epoch : 025, Training: Loss - 0.0528, Accuracy - 98.8333%, 
		Validation : Loss - 0.1614, Accuracy - 93.0000%, Time: 12.2925s
Epoch: 27/30
Epoch : 026, Training: Loss - 0.0583, Accuracy - 98.6667%, 
		Validation : Loss - 0.1518, Accuracy - 94.0000%, Time: 12.1458s
Epoch: 28/30
Epoch : 027, Training: Loss - 0.0407, Accuracy - 98.8333%, 
		Validation : Loss - 0.1408, Accuracy - 96.0000%, Time: 11.9303s
Epoch: 29/30
Epoch : 028, Training: Loss - 0.0671, Accuracy - 98.5000%, 
		Validation : Loss - 0.1508, Accuracy - 96.0000%, Time: 11.9089s
Epoch: 30/30
Epoch : 029, Training: Loss - 0.0443, Accuracy - 98.8333%, 
		Validation : Loss - 0.1763, Accuracy - 95.0000%, Time: 12.5318s
# Plot the learning curve.
history = np.array(history)
plt.plot(history[:, 0], color='red', marker='o')
plt.plot(history[:, 1], color='blue', marker='x')
plt.legend(['Tr Loss', 'Val Loss'])
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.ylim(0,1)
plt.savefig(dataset+'_loss_curve.png')
plt.show()

在这里插入图片描述

# Plot the learning curve.
history = np.array(history)
plt.plot(history[:, 2], color='red', marker='o')
plt.plot(history[:, -1], color='blue', marker='x')
plt.legend(['Tr Loss', 'Val Loss'])
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.ylim(0,1)
plt.savefig(dataset+'_loss_curve.png')
plt.show()

在这里插入图片描述

def computeTestSetAccuracy(model, loss_criterion):
    '''
    Function to compute the accuracy on the test set
    Parameters
        :param model: Model to test
        :param loss_criterion: Loss Criterion to minimize
    '''

    test_acc = 0.0
    test_loss = 0.0

    # Validation - No gradient tracking needed.
    with torch.no_grad():

        # Set to evaluation mode.
        model.eval()

        # Validation loop
        for j, (inputs, labels) in enumerate(test_data_loader):
            inputs = inputs.to(device)
            labels = labels.to(device)

            # Forward pass - compute outputs on input data using the model.
            outputs = model(inputs)

            # Compute loss
            loss = loss_criterion(outputs, labels)

            # Compute the total loss for the batch and add it to valid_loss.
            test_loss += loss.item() * inputs.size(0)

            # Calculate validation accuracy.
            ret, predictions = torch.max(outputs.data, 1)
            correct_counts = predictions.eq(labels.data.view_as(predictions))

            # Convert correct_counts to float and then compute the mean.
            acc = torch.mean(correct_counts.type(torch.FloatTensor))

            # Compute total accuracy in the whole batch and add to valid_acc.
            test_acc += acc.item() * inputs.size(0)

            print("Test Batch number: {:03d}, Test: Loss: {:.4f}, Accuracy: {:.4f}".format(j, loss.item(), acc.item()))

    # Find average test loss and test accuracy
    avg_test_loss = test_loss/len(data['test'])
    avg_test_acc = test_acc/len(data['test'])

    print("Test accuracy : " + str(avg_test_acc))
# Get a mapping of the indices to the class names, in order to see the output classes of the test images.
idx_to_class = {v: k for k, v in data['train'].class_to_idx.items()}
print(idx_to_class)
{0: 'bear', 1: 'chimp', 2: 'giraffe', 3: 'gorilla', 4: 'llama', 5: 'ostrich', 6: 'porcupine', 7: 'skunk', 8: 'triceratops', 9: 'zebra'}
def predict(model, test_image_name):
    '''
    Function to predict the class of a single test image
    Parameters
        :param model: Model to test
        :param test_image_name: Test image

    '''
    
    transform = image_transforms['test']


    test_image = Image.open(test_image_name)
    plt.imshow(test_image)
    
    test_image_tensor = transform(test_image)
    if torch.cuda.is_available():
        test_image_tensor = test_image_tensor.view(1, 3, 224, 224).cuda()
    else:
        test_image_tensor = test_image_tensor.view(1, 3, 224, 224)
    
    with torch.no_grad():
        model.eval()
        # Model outputs log probabilities.
        out = model(test_image_tensor)
        ps = torch.exp(out)

        topk, topclass = ps.topk(3, dim=1)
        cls = idx_to_class[topclass.cpu().numpy()[0][0]]
        score = topk.cpu().numpy()[0][0]

        for i in range(3):
            print("Predcition", i+1, ":", idx_to_class[topclass.cpu().numpy()[0][i]], ", Score: ", topk.cpu().numpy()[0][i])

# Test a particular model on a test image
# ! wget https://cdn.pixabay.com/photo/2018/10/01/12/28/skunk-3716043_1280.jpg -O skunk.jpg
model = torch.load("./Datasets/_model_{}.pt".format(best_epoch))
predict(model, './Datasets/skunk-3716043_1280.jpg')

# Load Data from folders
#computeTestSetAccuracy(model, loss_func)
Predcition 1 : skunk , Score:  0.99803644
Predcition 2 : porcupine , Score:  0.001755606
Predcition 3 : bear , Score:  8.1849976e-05

在这里插入图片描述

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

基于ResNet模型微调的自定义图像数据分类 的相关文章

  • 创建个人网站(二)前端主页设计和编写一(太阳移动)

    前言 以下内容纯纯当乐子来看就行 知识分享一下这样设计的原因 想看正文直接见下一节 为什么创建个人网站一之后几天没有动静了呢 一个是家里有事实在比较忙 第二个原因是没想到主页要设计成什么样 知道前两天问我姐什么样的主页比较炫酷 我们得出的结
  • 网工内推 | 美团、中通快递,网络运维,最高30K*15薪

    01 美团 招聘岗位 网络运维开发工程师 职责描述 1 负责新零售业务门店 仓库网络的日常运维 故障处理 应急响应 保障网络及相关业务的稳定运行 处理突发事件 对疑难问题进行跟踪并最终解决 2 负责新零售业务门店 仓库网络的建设和运维 对涉
  • 软件测试/测试开发|Ubuntu系统常用文件管理命令详解

    前言 Ubuntu是一种广泛使用的Linux操作系统 提供了丰富而强大的文件管理命令 使用户能够通过命令行轻松管理文件和目录 本文将介绍一些常用的Ubuntu文件管理命令 帮助用户更好地理解和利用系统资源 ls 列出目录内容 ls 选项 目

随机推荐

  • vue-springboot美食菜谱分享平台in9c2

    1 以人为本原则 这个厨房达人美食分享平台 它的本质是为人们服务 是希望其可以实现并满足广大使用者对科技时代下的厨房达人美食分享平台的憧憬与向往 如何解放需要人工才能完成的部分和提升使用者的使用观感是最为基础的也是最重要的 它的设计宗旨就是
  • 基于FPGA的简易BPSK和QPSK

    1 框图 2 顶层 3 m generator M序列的生成 输出速率为500Kbps 4 S2P是串并转换模块 将1bit的m序列转换到50M时钟下的2bit M序列数据 就有4个象限 5 my pll是生成256M的时钟作为载波 因为s
  • 线上环境如何正确配置 Django 的 DEBUG?

    Author rab Django Version 3 2 Python Version 3 9 目录 前言 一 DEBUG True 二 DEBUG False 三 页面异常解决 总结
  • 全网最全pytest大型攻略,单元测试学这就够了

    pytest 是一款以python为开发语言的第三方测试 主要特点如下 比自带的 unittest 更简洁高效 兼容 unittest框架 支持参数化 可以更精确的控制要测试的测试用例 丰富的插件 已有300多个各种各样的插件 也可自定义扩
  • nodejs+vue+微信小程序+python+PHP影片数据爬取与数据分析-计算机毕业设计推荐

    管理页面 管理员和用户都可以登录 通过输入账号和密码后 校验无误后方可进入对应的主界面 管理员可对用户使用的权限管理 以及对网站信息进行管理 9 影片数据爬取与数据分析分为两个部分 即管理员和用户 该系统是根据用户的实际需求开发的 贴近生活
  • uniapp-安卓APP开发时使用手机调试

    调试 1 手机打开开发者模式 华为手机举列 gt 设置 gt 关于手机 gt 版本号 多次连续点击 版本号 就会提示 已 打开开发者模式 2 华为手机举列 gt 设置 gt 系统和更新 gt 开发人员选项 gt 打开 USB调试 进入 调试
  • 【Python】练习题

    软文的诗词风将原有文章根据标点符号重新切分成短句并居中排版 对小屏幕阅读十分有利 使用程序将普通文章变成软文的诗词风十分有趣 s 窗前明月光 疑是地上霜 举头望明月 低头思故乡 将字符串按照句号进行分割 sentences s split
  • 新手漏洞挖掘经验分享

    前言 开始之前做个自我介绍 我是来自F0tsec团队的Subs 也是刚接触安全没有多久的菜狗 刚趁着安全客推荐的平台活动 尝试了三天漏洞挖掘 我运气挺好的 挖到了四个低危 2个中危 一个严重漏洞 也因此结实了SRC年度榜一榜二的几位大师傅
  • 判断对象是否为空

    说在前面 不知道大家对于算法的学习是一个怎样的心态呢 为了面试还是因为兴趣 不管是出于什么原因 算法学习需要持续保持 问题描述 给定一个对象或数组 判断它是否为空 一个空对象不包含任何键值对 一个空数组不包含任何元素 你可以假设对象或数组是
  • 银河麒麟v10 安装mysql 8.35

    银河麒麟v10 安装mysql 8 35 1 下载Mysql安装包 2 安装Mysql 8 35 2 1 安装依赖包 2 2 安装Mysql 2 3 安装后配置 1 下载Mysql安装包 访问官网下载链接 链接 https dev mysq
  • 波奇学Linux:进程替换

    单进程替换 excel使得能够在文件中运行系统指令 int excel 系统文件地址 系统指令 指令参数 NULL 成功时无返回值 失败时返回 1 如图进程成功运行第一个printf后再运行指令 但没有输出第二个printf的内容 本质上发
  • nodejs微信小程序+python+PHP影片数据爬取与数据分析-计算机毕业设计推荐

    目 录 摘 要 I ABSTRACT II 目 录 II 第1章 绪论 1 1 1背景及意义 1 1 2 国内外研究概况 1 1 3 研究的内容 1 第2章 相关技术 3 2 1 nodejs简介 4 2 2 express框架介绍 6 2
  • 软件测试测试环境搭建很难?一天学会这份测试环境搭建教程

    如何搭建测试环境 这既是一道高频面试题 又是困扰很多小伙伴的难题 因为你在网上找到的大多数教程 乃至在一些培训机构的课程 都不会有详细的说明 你能找到的大多数项目 是在本机电脑环境搭建环境 或是别人已经搭建好的环境 你很难上手体验在服务器上
  • 基于java的网络考试系统设计与实现

    基于java的网络考试系统设计与实现 I 引言 A 研究背景和动机 网络考试系统是一种新型的考试方式 通过互联网提供在线考试服务 并实现自动化评分 Java是一种常用的编程语言 因此基于Java的网络考试系统设计与实现是可行的 首先 设计网
  • vue-springboot超市在线选品购物商城货品信息管理系统wtk87

    本文所采用的研究方法有 1 调查法 通过市场调研和线下问卷调查等方式进行准确和全面的材料信息搜集工作 并对材料进行分析 2 类比法 了解国外和国内线上管理的现状 吸取和借鉴先进线上管理理论经验 并在系统设计中进行适当的应用 3 理论和实践结
  • python之pyQt5实例:鼠标点击+输入创建点

    如果按下的是左键 则调用self create point event pos 方法来在鼠标当前位置创建一个点 如果按下的键是回车键 即键值为Qt Key Return 则弹出一个输入对话框让用户输入x和y坐标 from PyQt5 QtW
  • nodejs微信小程序+python+PHP的协同过滤商品推荐系统设计与实现-计算机毕业设计推荐django

    现在的电子商务平台已经完全融入到了我们的日常生活中 像一些大家电类产品 日用百货 就是吃饭都可以通过外卖送餐 本系统前台主要是针对用户进行开发的 用户注册登录账号后就可以在线购物 购买自己的商品 还可以进行评价 收藏等操作 管理员主要是对整
  • 最新国内免费使用GPT-4分享,GPT语音对话,Midjourney绘画

    一 前言 ChatGPT3 5 GPT4 0 GPT语音对话 Midjourney绘画 相信对大家应该不感到陌生吧 简单来说 GPT 4技术比之前的GPT 3 5相对来说更加智能 会根据用户的要求生成多种内容甚至也可以和用户进行创作交流 然
  • 物流项目话术(1.5w字精选)

    物流项目的分类 技术架构图 面试时面试官会要求你画出技术架构图 功能结构图 业务功能流程 流程说明 用户在 用户端 下单后 生成订单 系统会根据订单生成 取件任务 快递员上门取件后成功后生成 运单 用户对订单进行支付 会产生 交易单 快件开
  • 基于ResNet模型微调的自定义图像数据分类

    Import necessary packages import torch import torch nn as nn from torchvision import datasets models transforms from tor