From d8ccdc87a71241c322547fae84edb2c3f569df70 Mon Sep 17 00:00:00 2001 From: pinb Date: Thu, 27 Apr 2023 14:07:04 +0000 Subject: [PATCH] =?UTF-8?q?=EC=97=85=EB=8D=B0=EC=9D=B4=ED=8A=B8=20'readme.?= =?UTF-8?q?md'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- readme.md | 161 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 120 insertions(+), 41 deletions(-) diff --git a/readme.md b/readme.md index 691d998..6522ef2 100644 --- a/readme.md +++ b/readme.md @@ -1,21 +1,34 @@ # 지능화 캡스톤 프로젝트 #1 - WDI-CNN ### *Wafer Map 데이터를 9종류의 Class로 분류하는 CNN 모델 만들기* +----- + + +[PINBlog Gitea Repository](https://gitea.pinblog.codes/CBNU/03_WDI_CNN) ----- + ### 논문 반도체 제조공정의 불균형 데이터셋에 대한 웨이퍼 불량 식별을 위한 심층 컨볼루션 신경망 -* 번역본 -https://gitea.pinblog.codes/attachments/9b2424f7-7e7d-4ad1-a368-86a523d67504 +* [번역본](https://gitea.pinblog.codes/attachments/9b2424f7-7e7d-4ad1-a368-86a523d67504) + +* [원본](https://gitea.pinblog.codes/attachments/9a31bb80-bc0a-4d5a-83b1-4ef0557456ad) -* 원본 -https://gitea.pinblog.codes/attachments/9a31bb80-bc0a-4d5a-83b1-4ef0557456ad +* 인용된 논문 리스트 +| 번호 | 논문 제목 | 저자 | 출판사 및 링크 | +|------|-----------|------|----------------| +| 1 | Deep Residual Learning for Image Recognition | Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun | IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. [링크](https://arxiv.org/abs/1512.03385) | +| 2 | Very Deep Convolutional Networks for Large-Scale Image Recognition | Karen Simonyan, Andrew Zisserman | International Conference on Learning Representations (ICLR), 2015. [링크](https://arxiv.org/abs/1409.1556) | +| 3 | Going Deeper with Convolutions | Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich | IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. [링크](https://arxiv.org/abs/1409.4842) | +| 4 | Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift | Sergey Ioffe, Christian Szegedy | International Conference on Machine Learning (ICML), 2015. [링크](https://arxiv.org/abs/1502.03167) | +| 5 | Dropout: A Simple Way to Prevent Neural Networks from Overfitting | Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, Ruslan Salakhutdinov | Journal of Machine Learning Research (JMLR), 2014. [링크](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf) | ----- + ### Dataset [Kaggle - WDI Data](https://www.kaggle.com/qingyi/wm811k-wafer-map/code) @@ -25,19 +38,33 @@ https://gitea.pinblog.codes/attachments/9a31bb80-bc0a-4d5a-83b1-4ef0557456ad ----- + ### 수행방법 * 위 논문을 참고하여 CNN 모델을 구현하고, WDI Dataset을 학습하여 9개의 클래스로 분류한다. - (Center, Donut, Edge-Loc, Edge-Ring, Loc, Near-full, none, Random, Scratch) - - https://gitea.pinblog.codes/CBNU/03_WDI_CNN/releases/tag/info +| 클래스 | 라벨 | Train 이미지 개수 | Validation 이미지 개수 | Test 이미지 개수 | +|--------|------|-------------------|------------------------|------------------| +| None | 0 | 117,431 | 15,000 | 15,000 | +| Center | 1 | 3,294 | 500 | 500 | +| Donut | 2 | 444 | 50 | 50 | +| Edge-Loc | 3 | 4,189 | 500 | 500 | +| Edge-Ring |4 |7,680 |1,000 |1,000 | +| Local |5 |2,794 |400 |400 | +| Random |6 |666 |100 |100 | +| Scratch |7 |894 |150 |150 | +| Near-full |8 |149 |- |- | + + [프로젝트 관련 자료](https://gitea.pinblog.codes/CBNU/03_WDI_CNN/releases/tag/info) # Model - -```python +
+Code View +
+ +````python import torch import torch.nn as nn import torch.nn.functional as F @@ -99,12 +126,17 @@ class CNN_WDI(nn.Module): return F.softmax(x, dim=1) cnn_wdi = CNN_WDI(class_num=9) +```` -``` +
+
# Load Data - -```python +
+Code View +
+ +````python from torchvision import transforms, datasets # 데이터 전처리 @@ -124,11 +156,17 @@ data_transforms = transforms.Compose([ train_dataset = datasets.ImageFolder(root='E:/wm_images/train/', transform=data_transforms) val_dataset = datasets.ImageFolder(root='E:/wm_images/val/', transform=data_transforms) test_dataset = datasets.ImageFolder(root='E:/wm_images/test/', transform=data_transforms) -``` +```` -# Settings +
+
-```python +# Settings +
+Code View +
+ +````python import torch.optim as optim device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -149,12 +187,17 @@ num_epochs = 100 #* 192 # Random sample size train_max_images = 95 val_max_images = 25 +```` -``` - +
+
+ # Train Function - -```python +
+Code View +
+ +````python # 학습 함수 정의 def train(model, dataloader, criterion, optimizer, device): model.train() @@ -181,11 +224,17 @@ def train(model, dataloader, criterion, optimizer, device): epoch_acc = running_corrects.double() / len(dataloader.dataset) return epoch_loss, epoch_acc -``` +```` -# Evaluate Function +
+
-```python +# Evaluate Function +
+Code View +
+ +````python # 평가 함수 정의 def evaluate(model, dataloader, criterion, device): model.eval() @@ -208,12 +257,17 @@ def evaluate(model, dataloader, criterion, device): epoch_acc = running_corrects.double() / len(dataloader.dataset) return epoch_loss, epoch_acc -``` +```` -# Train +
+
- -```python +# Train +
+Code View +
+ +````python # Train & Validation의 Loss, Acc 기록 파일 s_title = 'Epoch,\tTrain Loss,\tTrain Acc,\tVal Loss,\tVal Acc\n' with open('output.txt', 'a') as file: @@ -243,19 +297,24 @@ for epoch in range(num_epochs + 1): if epoch % 10 == 0: # 모델 저장 torch.save(cnn_wdi.state_dict(), 'CNN_WDI_' + str(epoch) + 'epoch.pth') -``` - +```` +
+
----- + ### 평가방법 * 모델의 성능지표(Precision, Recall, Accuracy, F1-Score)를 혼동행렬(Confusion Metrix)로 구현한다. # Confusion Metrix - -```python +
+Code View +
+ +````python import numpy as np import matplotlib.pyplot as plt import seaborn as sns @@ -335,12 +394,17 @@ def predict_and_plot_metrics(title, model, dataloader, criterion, device): plot_metrics(title, class_names, precisions, recalls, f1_scores, epoch_acc.item()) return epoch_loss, epoch_acc, report +```` -``` +
+
# Evaluate - -```python +
+Code View +
+ +````python import os import re @@ -363,11 +427,17 @@ for model in sorted_models: # Call the predict_and_plot_metrics function with the appropriate arguments epoch_loss, epoch_acc, report = predict_and_plot_metrics(model, cnn_wdi, test_loader, criterion, device) # print(f'Model: {model} Test Loss: {test_loss:.4f} Acc: {test_acc:.4f}') -``` +```` -# Loss Graph +
+
-```python +# Loss Graph +
+Code View +
+ +````python import matplotlib.pyplot as plt # 파일에서 데이터를 읽어들입니다. @@ -405,12 +475,17 @@ plt.ylabel('Values') plt.title('Training and Validation Loss and Accuracy') plt.legend() plt.show() +```` -``` +
+
# Print Selecting Test Model Result - -```python +
+Code View +
+ +````python def output(model, dataloader, criterion, device): model.eval() running_loss = 0.0 @@ -460,10 +535,14 @@ def output(model, dataloader, criterion, device): selected_model = 'CNN_WDI_20epoch.pth' cnn_wdi.load_state_dict(torch.load(selected_model)) output(cnn_wdi, test_loader, criterion, device) -``` +```` + +
+
----- + ### 테스트 결과 -[1차 테스트](https://gitea.pinblog.codes/CBNU/03_WDI_CNN/wiki/1%EC%B0%A8-%ED%85%8C%EC%8A%A4%ED%8A%B8_%EC%9B%90%EB%B3%B8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%ED%95%99%EC%8A%B5) \ No newline at end of file +[1차 테스트](https://gitea.pinblog.codes/CBNU/03_WDI_CNN/wiki/1%EC%B0%A8-%ED%85%8C%EC%8A%A4%ED%8A%B8_%EC%9B%90%EB%B3%B8-%EB%8D%B0%EC%9D%B4%ED%84%B0-%ED%95%99%EC%8A%B5)