AI

[Pytorch] LENET5 모델 학습 및 추론 코드(마스크 구별 프로그램)

전자둥이 2021. 11. 11. 10:29
반응형

안녕하세요

LENET5모델을 활용하여 Pytorch를 통해 학습시키는 과정을 해볼려고 합니다.

우선 전체 코드 공유해드릴게요~

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import os
import torchvision.transforms as transforms
from torchsummary import summary
from torch import optim
from datetime import datetime

##path define
DATASET_PATH = './dataset/'
EPOCHS = 1
"""
Custom Dataset
"""
class CustomDataset(Dataset):
    def __init__(self, transforms = None):
        """
        Dataset preprocessing part
        """
        self.class_names = os.listdir(DATASET_PATH)
        self.class_names = sorted(self.class_names)
        self.img_list = []
        self.label_list = []
        print(self.class_names)
        for index, class_name in enumerate(self.class_names):
            label = index
            img_dir = os.path.join(DATASET_PATH,class_name)
            img_files = os.listdir(img_dir)
            for img_file in img_files:
                img_file = os.path.join(img_dir,img_file)
                self.img_list.append(img_file)
                self.label_list.append(label)

        self.transforms = transforms
        
    def __len__(self):
        """
        The length of the dataset
        """
        return len(self.img_list)

    def __getitem__(self, idx):
        """
        to get 1 specific sample from a dataset
        """
        image = Image.open(self.img_list[idx]).convert('RGB')
        if self.transforms is not None:
            image = self.transforms(image)
        
        return image, self.label_list[idx]


"""
MODEL
"""
class LENET5(nn.Module):
    def __init__(self, class_num) -> None:
        super(LENET5,self).__init__()
        self.feature_extractor = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5,stride=1),
                                                nn.Tanh(),
                                                nn.AvgPool2d(kernel_size=2),
                                                nn.Conv2d(in_channels=6,out_channels=16,kernel_size=5,stride=1),
                                                nn.Tanh(),
                                                nn.AvgPool2d(kernel_size=2),
                                                nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),
                                                nn.Tanh()
                                                )
        self.classifier = nn.Sequential(nn.Linear(in_features=120,out_features=84),
                                            nn.Tanh(),
                                            nn.Linear(in_features=84,out_features=class_num)
                                            )
    
    def forward(self, x):
        x = self.feature_extractor(x)
        x = torch.flatten(x,1)
        logits = self.classifier(x)
        probs = F.softmax(logits,dim=1)
        return logits, probs


"""
DataLoader
"""
transform=transforms.Compose([
                        transforms.Resize((32,32)),
                        transforms.ToTensor(),
                        transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
                    ])

dataset = CustomDataset(transform)
dataloader = DataLoader(dataset, batch_size=4, shuffle=False, sampler=None,
           batch_sampler=None, num_workers=1,
           pin_memory=True, drop_last=True, timeout=0,
           worker_init_fn=None)

### test
"""
for data in dataloader:
    print("-------")
    print(data[0].shape, data[1])
"""

"""
Model Explanation
"""
model = LENET5(class_num=2)
print(model)

device = torch.device("cuda")
model.to(device)

summary(model, input_size=(3,32,32))


"""
loss function
"""
loss_func = nn.CrossEntropyLoss(reduction='sum')

"""
optimizer
"""
optimizer = optim.Adam(model.parameters(), lr=0.001)

"""
train
"""
model.train()
for i in range(0,EPOCHS):
    running_loss = 0
    for x, y in dataloader:
        optimizer.zero_grad()

        x = x.to(device)
        y = y.to(device)

        X, _ = model(x)
        loss = loss_func(X,y)
        running_loss += loss.item() 

        loss.backward()
        optimizer.step()
    epoch_loss = running_loss / len(dataloader)
    print(f'{datetime.now().time().replace(microsecond=0)} --- 'f'Epoch: {i}\t'f'Train loss: {epoch_loss:.4f}\t')

"""
eval
"""
print('measure accuracy')
with torch.no_grad():
    n = 0
    correct_pred = 0
    model.eval()
    for x,y in dataloader:
        x = x.to(device)
        y = y.to(device)
        _, probs = model(x)
        _, predicted_labels = torch.max(probs,1)

        n += y.size(0)
        correct_pred += (predicted_labels == y).sum().float().cpu()
print('Accuracy:',(correct_pred/n))

LENET5에 대한 설명은 제 블로그말고도 다른 블로그에 잘 설명되어있어서 생략하도록 하겠습니다.

우선 저는 MNIST, CIFAR-10 같은 공인 데이터셋이 아닌 직접 모은 데이터셋기반으로 학습시키는 방법을 소개할려고합니다.

그러기 위해서는 우선 데이터셋을 직접 모아야할것입니다.

저는 예제를 위해서 마스크얼굴 , 그냥 얼굴 이렇게 2가지 클래스가 있는 데이터셋을 준비해봤습니다.


마스크얼굴 220장
그냥 얼굴 220장

원래는 train dataset, validation dataset, test dataset이렇게 따로 준비를 하고 진행을 해야하지만, 학습이 되는지, 추론이 되는지에 의의를 두기위해서 따로 구별을 하지는 않겠습니다.
(train dataset을 그대로 이용하여 추론까지 진행할것입니다..)

저의 dataset의 디렉토리는 다음과 같이 구성되어있기 때문에
dataset/
    mask/
        *.jpg/
    without_mask/
        *.jpg/

CustomDataset 클래스를 위에 올려둔 코드와 같이 짰습니다.

그리고 LENET5모델을 구성한 후 ,
optimizer은 Adam
Loss function 은 CrossEntropyLoss를 사용하여 학습을 진행하였으며, 학습 진행이 끝나자마자 해당 weight를 활용하여 추론을 진행하는 코드입니다.

물론 실제로는 학습, 추론코드가 따로 구분이 되어있어야 편리하겠지만, 앞서 말했듯이 학습이 되는 과정과 추론이 되는 과정만을 살펴보기위해 다음과 같이 코드를 구성했습니다.

실제로 코드를 동작했을 때 결과 사진 올려두겠습니다~


-> 현재 결과는 epochs를 1만 주고 진행한 결과이기 때문에 train dataset과 test dataset이 같음에도 불구하고 정확도가 50프로 밖에 안나오는 것을 확인 할 수 있습니다.


긴글 읽어주셔서 감사합니다~

반응형