You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

837 lines
267 KiB
Plaintext

{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Class\n",
"\n",
"0: head\n",
"\n",
"1: helmet\n",
"\n",
"2: face\n",
"\n",
"3: mask\n",
"\n",
"4: helmet & mask"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
1 year ago
"# Import Packages"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"import cv2\n",
"import math\n",
"import numpy as np\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import torch\n",
"import torch.nn.functional as F\n",
"from torchvision import transforms\n",
"from sklearn.metrics import accuracy_score\n",
"import yaml\n",
"\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Classfication"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def crop_and_classify(img, boxes, classification_model, transform):\n",
" # classification_model.to(device)\n",
" # classification_model.eval()\n",
" \n",
" for box in boxes:\n",
" x1, y1, x2, y2 = box\n",
" # Expand the bounding box by 10%\n",
" width = x2 - x1\n",
" height = y2 - y1\n",
" x1 = max(int(x1 - 0.1 * width), 0)\n",
" y1 = max(int(y1 - 0.1 * height), 0)\n",
" x2 = min(int(x2 + 0.1 * width), img.shape[1] - 1)\n",
" y2 = min(int(y2 + 0.1 * height), img.shape[0] - 1)\n",
"\n",
" # Crop the image\n",
" cropped_img = img[y1:y2, x1:x2]\n",
"\n",
" # Transform the image to fit the input size of the classification model\n",
" transformed_img = transform(cropped_img)\n",
" transformed_img = transformed_img.unsqueeze(0)\n",
"\n",
" # Classify the cropped image\n",
" with torch.no_grad():\n",
" outputs = classification_model(transformed_img)\n",
" _, predicted = torch.max(outputs.data, 1)\n",
" confidence = torch.nn.functional.softmax(outputs, dim=1)[0][predicted].item()\n",
"\n",
" # Print the classification result\n",
" print(f\"Class: {predicted.item()}, Confidence: {confidence:.2f}\")\n",
"\n",
" # Draw the bounding box and classification result on the image\n",
" cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n",
" cv2.putText(img, f\"Class: {predicted.item()}, Confidence: {confidence:.2f}\", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,255,0), 2)\n",
" return img"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Detection"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def run_detection_model(img, model_detect):\n",
" # img = transforms.ToTensor()(img).unsqueeze(0).to(device) # Convert image to tensor and add batch dimension\n",
" # model_detect.eval() # Set model to evaluation mode\n",
" img = transforms.ToTensor()(img).unsqueeze(0)\n",
" \n",
" # Move the model to the GPU\n",
" model_detect = model_detect.to(device)\n",
"\n",
" # Run inference\n",
" with torch.no_grad():\n",
" detections = model_detect(img)\n",
"\n",
" # Extract bounding boxes and confidence scores\n",
" boxes = detections.pred[0][:, :4].cpu().numpy().tolist()\n",
" scores = detections.pred[0][:, 4].cpu().numpy().tolist()\n",
"\n",
" return boxes, scores\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load detection model"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using cache found in C:\\Users\\pinb/.cache\\torch\\hub\\ultralytics_yolov5_master\n",
"YOLOv5 2023-6-12 Python-3.10.9 torch-1.12.1 CUDA:0 (NVIDIA GeForce RTX 3060, 12288MiB)\n",
"\n",
"Fusing layers... \n",
"Model summary: 157 layers, 7012822 parameters, 0 gradients, 15.8 GFLOPs\n",
"Adding AutoShape... \n"
]
}
],
"source": [
"weights_detect = \"C:/Users/pinb/Desktop/pytorch/yolov5/runs/train/dduk_64_/weights/best.pt\"\n",
"model_detect = torch.hub.load('ultralytics/yolov5', 'custom', path=weights_detect)\n",
"model_detect = model_detect.to(device).eval()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load classification model"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using cache found in C:\\Users\\pinb/.cache\\torch\\hub\\ultralytics_yolov5_master\n",
"YOLOv5 2023-6-12 Python-3.10.9 torch-1.12.1 CUDA:0 (NVIDIA GeForce RTX 3060, 12288MiB)\n",
"\n",
"Fusing layers... \n",
"Model summary: 117 layers, 4173093 parameters, 0 gradients, 10.4 GFLOPs\n",
"WARNING YOLOv5 ClassificationModel is not yet AutoShape compatible. You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).\n"
]
}
],
"source": [
"weights_cls = \"C:/Users/pinb/Desktop/pytorch/yolov5/runs/train-cls/dduk_cls3/weights/best.pt\"\n",
"model_cls = torch.hub.load('ultralytics/yolov5', 'custom', path=weights_cls)\n",
"model_cls = model_cls.to(device).eval()\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load YAML"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"# # YAML 파일 경로\n",
"yaml_path = \"C:/Users/pinb/Desktop/pytorch/yolov5/runs/detect/test/data.yaml\"\n",
"\n",
"def read_dataset_paths(file_path):\n",
" with open(file_path, 'r') as file:\n",
" data = yaml.safe_load(file)\n",
" test_path = data.get('test')\n",
" train_path = data.get('train')\n",
" val_path = data.get('val')\n",
" \n",
" return os.path.join(os.path.dirname(file_path), val_path)\n",
"\n",
"# # YAML 파일에서 데이터셋 경로 읽기\n",
"val_path = read_dataset_paths(yaml_path)\n",
"\n",
"# 이미지가 있는 폴더 경로\n",
"folder_path = val_path\n",
"result_path = os.path.join(folder_path, \"result\")\n",
"# folder_path = \"C:/Users/pinb/Desktop/pytorch/yolov5/runs/detect/test/\"\n",
"# result_path = \"C:/Users/pinb/Desktop/pytorch/yolov5/runs/detect/test/result\"\n",
"\n",
"def get_image_and_label_paths(folder_path):\n",
" image_paths = []\n",
" label_paths = []\n",
" for file_name in os.listdir(folder_path):\n",
" if file_name.endswith(\".bmp\") or file_name.endswith(\".jpg\") or file_name.endswith(\".png\"):\n",
" image_path = os.path.join(folder_path, file_name)\n",
" image_paths.append(image_path)\n",
" file_name = os.path.splitext(file_name)[0] + \".txt\"\n",
" label_path = folder_path.replace(\"images\", \"labels\")\n",
" label_path = os.path.join(label_path, file_name)\n",
" if os.path.exists(label_path):\n",
" label_paths.append(label_path)\n",
" else:\n",
" label_paths.append(\"\")\n",
"\n",
" return image_paths, label_paths\n",
"\n",
"# 폴더 내의 이미지 파일 경로들을 가져옴\n",
"image_paths, label_paths = get_image_and_label_paths(folder_path)\n",
"\n",
"if not os.path.exists(result_path):\n",
" os.makedirs(result_path)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Read Label Data"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def read_label(label_path):\n",
" labels = []\n",
" with open(label_path, 'r') as file:\n",
" lines = file.readlines()\n",
" for line in lines:\n",
" label = line.strip().split(' ')\n",
" label_class = int(label[0])\n",
" bounding_box = tuple(map(float, label[1:]))\n",
" labels.append((label_class, bounding_box))\n",
" return labels"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Find closet label class"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def find_closest_label_class(pred_box, labels):\n",
" min_distance = float('inf')\n",
" closest_class = -1\n",
"\n",
" for i, label in enumerate(labels):\n",
" label_box = label[1]\n",
" label_center_x = (label_box[0] + label_box[2]) / 2\n",
" label_center_y = (label_box[1] + label_box[3]) / 2\n",
"\n",
" pred_center_x = (pred_box[0] + pred_box[2]) / 2\n",
" pred_center_y = (pred_box[1] + pred_box[3]) / 2\n",
"\n",
" distance = math.sqrt((label_center_x - pred_center_x) ** 2 + (label_center_y - pred_center_y) ** 2)\n",
"\n",
" if distance < min_distance:\n",
" min_distance = distance\n",
" closest_class = label[0]\n",
"\n",
" return closest_class"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Evaluate"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"lst_result = [[],[],[],[],[]] # class 0~4\n",
"eval_times = []\n",
"\n",
"def Evaluate(model_detect, model_cls, img, img_name, labels, output=True):\n",
" height, width, _ = img.shape\n",
" expand_ratio = 0.5 # The ratio to expand the ROI area\n",
"\n",
" # Inference detection model\n",
" start_time = time.time()\n",
" results = model_detect(img)\n",
"\n",
" # For each detection, crop the ROI and classify it\n",
" for *box, detect_conf, cls in results.xyxy[0]:\n",
" x1, y1, x2, y2 = map(int, box)\n",
" fbox = map(float, box)\n",
"\n",
" # Calculate the width and height of the bounding box\n",
" bbox_width = x2 - x1\n",
" bbox_height = y2 - y1\n",
" roi = (x1, y1, bbox_width, bbox_height)\n",
"\n",
" # Calculate the expanded coordinates\n",
" x1_expanded = max(0, x1 - int(bbox_width * expand_ratio))\n",
" y1_expanded = max(0, y1 - int(bbox_height * expand_ratio))\n",
" x2_expanded = min(width, x2 + int(bbox_width * expand_ratio))\n",
" y2_expanded = min(height, y2 + int(bbox_height * expand_ratio))\n",
"\n",
" roi_flat = (x1_expanded, \n",
" y1_expanded, \n",
" bbox_width + int(bbox_width * expand_ratio) * 2, \n",
" bbox_height + int(bbox_height * expand_ratio) * 2)\n",
"\n",
" # Crop the expanded ROI from the image\n",
" # roi = img[y1:y2, x1:x2]\n",
" roi_expanded = img.copy()\n",
" roi_expanded = roi_expanded[y1_expanded:y2_expanded, x1_expanded:x2_expanded]\n",
" roi_expanded = cv2.resize(roi_expanded, (224, 224))\n",
"\n",
" # Convert numpy array to torch tensor, and normalize pixel values\n",
" roi_expanded = torch.from_numpy(roi_expanded).float().div(255.0)\n",
"\n",
" # Reshape tensor to (channels, height, width)\n",
" roi_expanded = roi_expanded.permute(2, 0, 1)\n",
"\n",
" # Add an extra dimension for the batch\n",
" roi_expanded = roi_expanded.unsqueeze(0)\n",
"\n",
" # Move roi_expanded to the same device as class_model\n",
" roi_expanded = roi_expanded.to(device)\n",
" class_result = model_cls(roi_expanded)\n",
"\n",
" # classfication result\n",
" probabilities = F.softmax(class_result, dim=1)\n",
" max_confidence, max_indices = torch.max(probabilities, dim=1)\n",
" class_pred = max_indices.item()\n",
" class_conf = max_confidence.item()\n",
"\n",
" # confidence\n",
" total_conf = (detect_conf + class_conf) * 0.5\n",
"\n",
" # label - class\n",
" class_gt = find_closest_label_class((x1, y1, x2, y2), labels)\n",
"\n",
" # append (class, confidence, bounding box)\n",
" lst_result[class_pred].append((class_pred, class_gt, total_conf, (x1, y1, x2, y2)))\n",
"\n",
" # Put classification result on each ROI\n",
" thin = 1\n",
" color = ()\n",
" if class_pred == 0: # head\n",
" color = (0, 0, 255)\n",
" elif class_pred == 1: # helmet\n",
" color = (255, 0, 0)\n",
" elif class_pred == 2: # face\n",
" color = (0, 255, 0)\n",
" elif class_pred == 3: # mask\n",
" color = (255, 0, 255)\n",
" elif class_pred == 4: # helmet & mask\n",
" color = (255, 255, 0)\n",
" cv2.putText(img, f'{class_pred}: {total_conf}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thin)\n",
" # cv2.rectangle(img, roi_flat, color, thin)\n",
" cv2.rectangle(img, roi, color, thin)\n",
" if output is True:\n",
" print(f'{img_name}\\'s det + cls result [{class_pred}]: {total_conf}%, {roi}:')\n",
"\n",
" end_time = time.time()\n",
" total_time = end_time - start_time\n",
" eval_times.append(total_time)\n",
" if output is True:\n",
" print(f'Detect + Classfication Time: {total_time}s')\n",
"\n",
" # Save image\n",
" cv2.imwrite(f'{result_path}/{img_name}_rst.jpg', img)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Compare Ground Truth & Bounding Box"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Class\n",
"# 0: head\n",
"# 1: helmet\n",
"# 2: face\n",
"# 3: mask\n",
"# 4: helmet & mask\n",
"\n",
"def CompareClass(pr, gt):\n",
" if gt == 0: # head or face\n",
" if pr == 0 or pr == 2:\n",
" return True\n",
" else:\n",
" False\n",
" elif gt == 1: # helmet or mask\n",
" if pr == 1 or pr == 3 or pr == 4:\n",
" return True\n",
" else:\n",
" return False"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [],
"source": [
"def CompareGTandBox(predict, setting_mAP=0):\n",
" # 0: mAP@.5, 1: mAP@.5:.95 0\n",
" # 비교하여 TP와 FP 구분\n",
" results = []\n",
"\n",
" tp_cnt = 0\n",
" fp_cnt = 0\n",
" totalbox_cnt = 0\n",
" targetbox_cnt = len(predict)\n",
"\n",
" for p_class, gt_class, confidence, p_box in predict:\n",
" if CompareClass(p_class, gt_class):\n",
" # iou = calculate_iou(p_box, l_box)\n",
" # iou = confidence\n",
" if setting_mAP == 0: # mAP@.5\n",
" if confidence >= 0.5:\n",
" # True Positive (TP)\n",
" tp_cnt = tp_cnt + 1\n",
" totalbox_cnt = totalbox_cnt +1\n",
" # class, confidence, boudingbox, TP/FP, precision, recall\n",
" results.append((p_class, confidence, p_box, True, tp_cnt/totalbox_cnt, tp_cnt/targetbox_cnt))\n",
" elif setting_mAP == 1: # mAP@.5:.95\n",
" if confidence >= 0.5 and confidence < 0.95:\n",
" # True Positive (TP)\n",
" tp_cnt = tp_cnt + 1\n",
" totalbox_cnt = totalbox_cnt + 1\n",
" # class, confidence, boudingbox, TP/FP, precision, recall\n",
" results.append((p_class, confidence, p_box, True, tp_cnt/totalbox_cnt, tp_cnt/targetbox_cnt))\n",
" else: # all P, R\n",
" tp_cnt = tp_cnt + 1\n",
" totalbox_cnt = totalbox_cnt + 1\n",
" results.append((p_class, confidence, p_box, True, tp_cnt/totalbox_cnt, tp_cnt/targetbox_cnt))\n",
" \n",
" else:\n",
" # False Positive (FP)\n",
" totalbox_cnt = totalbox_cnt + 1\n",
" results.append((p_class, confidence, p_box, False, tp_cnt/totalbox_cnt, tp_cnt/targetbox_cnt))\n",
"\n",
" return results"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Get Precisions, Recalls and PR Curve"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
"def GetPR(results):\n",
" results = sorted(results, key=lambda x: x[5], reverse=True)\n",
" # results = sorted(results, key=lambda x: x[1], reverse=True)\n",
" precisions = [item[4] for item in results]\n",
" recalls = [item[5] for item in results]\n",
"\n",
" return precisions, recalls"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Calculate AP"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [],
"source": [
"def calculate_AP(precisions, recalls):\n",
" assert len(recalls) == len(precisions)\n",
"\n",
" sorted_indices = sorted(range(len(recalls)), key=lambda i: recalls[i])\n",
" sorted_precisions = [precisions[i] for i in sorted_indices]\n",
" sorted_recalls = [recalls[i] for i in sorted_indices]\n",
"\n",
" ap = 0\n",
" for i in range(1, len(sorted_recalls)):\n",
" recall_diff = sorted_recalls[i] - sorted_recalls[i-1]\n",
" ap += recall_diff * (sorted_precisions[i] + sorted_precisions[i-1]) * 0.5\n",
"\n",
" return ap"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Run"
]
},
{
"cell_type": "code",
"execution_count": 63,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Class 0\n",
"meanP: 0.2451500387154128\n",
"meanR: 0.1266693073789041\n",
"AP@.5: 0.056300405455419165\n",
"AP@.5:.95: 0.056300405455419165\n",
"\n",
"Class 1\n",
"meanP: 0.8831870385742567\n",
"meanR: 0.43243055555555554\n",
"AP@.5: 0.6344911937169155\n",
"AP@.5:.95: 0.6344911937169155\n",
"\n",
"Class 2\n",
"meanP: 0\n",
"meanR: 0\n",
"AP@.5: 0\n",
"AP@.5:.95: 0\n",
"\n",
"Class 3\n",
"meanP: 0.8349450369473351\n",
"meanR: 0.40800165622897366\n",
"AP@.5: 0.591933273620593\n",
"AP@.5:.95: 0.591933273620593\n",
"\n",
"Class 4\n",
"meanP: 0.8888927177286716\n",
"meanR: 0.43473958333333357\n",
"AP@.5: 0.727062010345605\n",
"AP@.5:.95: 0.727062010345605\n",
"\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAArYAAAGdCAYAAADngDCEAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAACQm0lEQVR4nO3dd3hT5dsH8G/StOkedBdKgbLKlr1nQexPRRkiooCg4kIUJyIvOEHFhQtEZCgqyHABArIU2cgSWkZZLdCd7jZtk+f9IyRtmqRNk7Rp0u/nunpd9OSc5Jym3L1zn+e5H4kQQoCIiIiIyMFJ7X0CRERERES2wMSWiIiIiJwCE1siIiIicgpMbImIiIjIKTCxJSIiIiKnwMSWiIiIiJwCE1siIiIicgpMbImIiIjIKTCxJSIiIiKnwMSWdObPnw+JRIIrV67Y+1QaNGveh2bNmmHw4ME2P6eGQCKRYMqUKXrb+POs3rBhw/C///3P3qdB5DBUKhViYmLw6KOP2vtUnBITWyenVCqxZMkSDBs2DMHBwXB1dUVgYCAGDx6Mjz76CHl5efY+RYts3LgRvXv3hpeXFwICAnDXXXfh1KlTZh8/ePBgSCQS3ZdMJkNYWBjuvfdeHDx4sBbPvGFYuXKl3s9XIpHAx8cHPXr0wOLFi6FSqex9irVm//79ePDBB9G8eXN4eHjAy8sL7du3x4wZM3D69Gl7n55N/fzzz9i9ezfeeuutWn+tO+64AxKJBMOGDTO5j/ZDofZLKpXC398fAwcOxJo1a8x+rdLSUhQWFlp1vleuXDH4P6D9CgoKsuq5a8ONGzcgk8kgkUjw7bffmtyvWbNmetfi6uqKJk2aYOLEiUhISDD79fLz862OA5Xf74pfL7zwglXPDQAFBQV49dVX0apVK8jlcgQHB2P8+PG4cOGCwb579uwxeS7du3fX29fFxQXz58/HN998g5MnT1p9nqRPZu8ToNpz7do1XcI3cOBAzJo1C2FhYVAoFPj777/x0ksvYevWrdi+fbu9T7VGli9fjkceeQQdOnTAu+++C6VSic8++wz9+vXDvn370LlzZ7OeRyqVYtWqVQA0HwBOnDiB5cuXY/Pmzfjzzz8xcODA2rwMk1577TW88sorkMvlNT723LlzkEgktXBWlnniiSfQt29fCCFw8+ZNrFq1CjNnzkR8fDy+/PJLe5+eTQkhMGvWLHz88cdo3Lgxxo8fjzZt2kCtVuPMmTPYsGEDvvjiC1y9ehVNmjSx9+naxPz58zFkyBDcdttttfo6SUlJ2L59O1q2bIndu3fj0qVLaNGihcn9586di9atW0OlUuHy5ctYtmwZHnzwQSQlJeGVV14xesyuXbvwzTffYNeuXbh58yYAwNfXF7169cL999+PSZMmQSar+Z/Me++9F6NHj9bb5u7uXuPnqW0rVqyAEALNmzfH119/jYceesjkvqGhoVi0aBEATfJ38OBBfPfdd/j9999x5MgRtG7d2uCYwsJCfP3119i0aRMOHTqEoqIiSKVSRERE4I477sC0adPQq1cvi879o48+Mviw0K5dO4ueS6u4uBiDBw/G0aNHMWrUKDz33HNIT0/HF198gV69euHAgQNo06aNwXGPPfYYBgwYoLctMDDQYL9x48bhhRdewFtvvYWffvrJqnOlSgQ5peLiYtGxY0fh4uIivv/+e6P7XLp0ScydO1f3/bx58wQAcfny5To6y5pTKBTC19dXNGnSROTk5Oi2JyUlCR8fHzFw4ECznmfQoEHCxcXFYPu6desEABEXF1fl8Xl5eTU78QZmxYoVAoD49ttv9bbn5eWJJk2aCKlUKtLT0+10dsYBEJMnT9bbFhUVJQYNGmTW8W+//bYAIEaPHi0KCwsNHi8uLhbz588X165ds8HZahQVFYnS0lKbPV9N/P333wKAWLlyZa2/1vz584WLi4s4fvy4cHV1Fa+++qrR/bQx7O+//9bbfuXKFeHh4SF8fX0Nfl5paWlixIgRAoAYOHCgWLhwodi0aZP47bffxNKlS8V9990nPDw8RLt27cTx48fNPufLly8LAGLevHk1vdw6p1arRYsWLcTIkSPFRx99JACIc+fOGd03KipKREdHG2x/7733BADx5JNPGjy2bds2ERoaKnx8fMSkSZPEsmXLxObNm8W6devEO++8I3r27CkkEomYMmWKKCoqMvu8a/Nv1ieffCIAiMcee0xve2JiovDw8BAjRozQ2757924BQKxYscLs13jllVeETCYTN27csMUp0y1MbJ3U559/LgCI559/3uxjjAWJ69evi+eff17cdtttIiAgQLi5uYlWrVqJV1991eCPt1qtFosXLxZdunQRvr6+wtPTU0RFRYn7779fpKSk6PY7e/asuP/++0WTJk2Eq6urCAwMFD179hTLly+v9hy1CdP8+fMNHps2bZrZQc5UYpuXlycAiNatWwsh9IPVkiVLRMeOHYVcLtdLgHbt2iVGjhwp/P39hZubm2jbtq1YuHChKCsrM3j+S5cuiUceeUQ0bdpUuLm5iZCQEDF8+HCxfft23T7G3oesrCzxwgsviJYtWwp3d3fh6+srYmJixHPPPaf3/KYSsa1bt4rBgwcLHx8f4e7uLjp37iw+++wzoVar9fabPHmyACCys7PF448/LkJCQoSbm5vo2rWr2LZtW7U/Vy1Tia0QQowZM0YAEAcOHNDbfvHiRTF58mQRHh4uXF1dRePGjcUTTzxhNAHOy8sT8+bNE+3btxfu7u7C399fdO/eXXz66ae6fXJzc8Vrr70mevXqJYKCgoSrq6uIiooSTz31lMjMzDR4TmsS2/T0dOHp6SkiIyNFQUFBtfsLUf4z2r17t8Fj2vehokGDBomoqChx5coVMX78eBEYGCgAiJMnTwoPDw8RGxtr9HWWLVsmAIjvvvtOt02tVouvvvpK9OjRQ3h6egpPT0/Rp08fsWnTJrPOXQghnn76aQHA6Puj/bmdOnVKjBgxQvj4+IhGjRqJadOmifz8fKFWq8V7770noqOjhZubm4iJiRG//vqr0ddRqVQiKipK3HHHHUIIIe655x4RERFh9P+XqcRWCCG6desmAOglESkpKaJ58+YiOjpaHDx40OS1Jicni1GjRgl/f39x9OjRan82QugntkVFRSI/P9+s48xROS7FxMQIuVwuWrVqJVatWqU75/Hjx4tGjRoJT09Pcffdd5tMoP78808BQKxdu1akpaUJV1dX8dJLLxnd11Rie/r0aQHAIOH74YcfhFQqFZMmTTL6/05r27ZtIiIiQsTGxgqlUmnWz6FirMzNzRUlJSVV7n/16lURHx9f7X5CCDFq1CijcUoIIUaMGCEkEoneh9SK70lBQYFZCbr2w2HFuEXW4xhbJ6W9tfH4449b9TynTp3Chg0bMGTIELzxxhtYtGgROnbsiAULFmDMmDF6+77zzjt45pln0LhxY7zzzjv46KOP8NBDD+HChQtISUkBAGRmZmLIkCHYuXMnpkyZgiVLlmD27Nlo27Yt/vrrr2rP59ChQwCAvn37Gjym3Xb48GGLr/f8+fMAgODgYL3tn3zyCebPn497770Xn376KUaOHAkA+OabbzBs2DCkpaXhlVdeweLFi9GjRw/Mnj0bEydO1HuOf//9F127dsXq1asRFxeHTz75BM8//zy8vb3x559/Vnle9913Hz7++GMMHz4cixcvxttvv40RI0Zg165d1V7T8uXLERcXh6tXr+LFF1/EO++8Ay8vLzz99NOYPn260WNuv/12XLlyBXPmzMHrr7+O69ev46677sK1a9eqfb3qJCYmAtC/PXfixAl069YN27dvx9SpU/H555/jgQcewLfffot+/fohJydHt29OTg769u2L119/HS1btsQ777yD119/Hbfddhs2btyo2+/69etYtmwZunfvjtdeew2LFy/GkCFDsHTpUgwdOhSlpaVWX4vW5s2bUVhYiEmTJsHT09Nmz1tZfn4+BgwYgJKSErz++utYsGABIiIicO+992LXrl1ITk42OGbVqlXw9fXVux3+8MMPY/r06br/q++88w5cXV1x773
"text/plain": [
"<Figure size 800x400 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAsAAAAGdCAYAAAAc18VbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAACZoElEQVR4nOzdd3iT1RcH8G9Gm+5Bd6G0UKCUvfcoG5nKkKVsEBV+LGUICIoIKCJDlL1RRBCRJRsUZMum7L2690gz7u+PNGnSvGnTjGadz/P0eeibN29v0nJ6et57z+UxxhgIIYQQQghxEHxLD4AQQgghhJDSRAkwIYQQQghxKJQAE0IIIYQQh0IJMCGEEEIIcSiUABNCCCGEEIdCCTAhhBBCCHEolAATQgghhBCHQgkwIYQQQghxKJQAE0IIIYQQh0IJMCmxOXPmgMfj4cmTJ5YeikMz5vsQERGBmJgYk4/JEfB4PAwdOlTjGL2fxWvXrh26du1q6WEQYjNkMhmio6MxatQoSw/FLlECTAAAYrEYK1euRLt27RAQEAAnJyf4+fkhJiYG33//PTIyMiw9xBJbsGAB+vXrh8qVK4PP50MoFJb4GjExMeDxeKoPoVCI4OBgvPPOOzh37pwZRu1YNm7cqPH+8ng8eHp6omHDhli2bBlkMpmlh2g2//77L9577z1UqFABrq6ucHd3R/Xq1TFu3DjcuHHD0sMzqT/++AMnTpzAV199Zfav9dZbb4HH46Fdu3Y6z1H+8aj84PP58PHxQatWrbBt2za9v5ZEIkF2drZR433y5InW/wHlh7+/v1HXNodXr15BKBSCx+Nhy5YtOs+LiIjQeC1OTk4oV64cBg0ahDt37uj99TIzM42OA4W/3+ofn3zyiVHXBoCsrCx89tlnqFy5MkQiEQICAtCvXz/cv39f69yTJ0/qHEuDBg00zhUIBJgzZw7Wr1+Pa9euGT1OoqnkGQGxO8+ePUP37t1x/fp1tGrVCpMmTUJwcDBSUlLwzz//YMqUKTh48CAOHz5s6aGWyPTp0+Hj44O6desiMzMTCQkJBl2Hz+dj06ZNABR/KFy9ehXr1q3D/v37cfToUbRq1cqUw9bbzJkzMW3aNIhEohI/9+7du+DxeGYYlWE+/PBDNGvWDIwxvH79Gps2bcL48eMRGxuLn376ydLDMynGGCZNmoQlS5agbNmy6NevH6KioiCXy3Hr1i3s2rULP/74I54+fYpy5cpZergmMWfOHLRp0wZ169Y169d5/vw5Dh8+jEqVKuHEiRN49OgRKlasqPP8WbNmoUqVKpDJZHj8+DHWrFmD9957D8+fP8e0adM4n3P8+HGsX78ex48fx+vXrwEAXl5eaNy4Mfr374/Bgwcb9Mf2O++8g169emkcc3FxKfF1zG3Dhg1gjKFChQpYu3Yt3n//fZ3nBgUFYdGiRQAUSeK5c+ewdetW7Nu3DxcvXkSVKlW0npOdnY21a9di9+7dOH/+PHJycsDn8xEaGoq33noLI0aMQOPGjQ0a+/fff6/1R0W1atUMupZSbm4uYmJicOnSJfTs2RMTJ05EQkICfvzxRzRu3Bhnz55FVFSU1vNGjx6Nli1bahzz8/PTOq9v37745JNP8NVXX+G3334zaqykEEYcWm5uLqtZsyYTCATs559/5jzn0aNHbNasWarPZ8+ezQCwx48fl9IoDfPgwQPVv1u3bs0EAkGJr6HreTt27GAAWJcuXYp8fkZGRom/piPZsGEDA8C2bNmicTwjI4OVK1eO8fl8lpCQYKHRcQPAhgwZonEsPDyctW7dWq/nz5s3jwFgvXr1YtnZ2VqP5+bmsjlz5rBnz56ZYLQKOTk5TCKRmOx6JfHPP/8wAGzjxo1m/1pz5sxhAoGAXblyhTk5ObHPPvuM8zxlDPvnn380jj958oS5uroyLy8vrfcrPj6edezYkQFgrVq1YgsWLGC7d+9me/fuZatWrWLvvvsuc3V1ZdWqVWNXrlzRe8yPHz9mANjs2bNL+nJLnVwuZxUrVmSdO3dm33//PQPA7t69y3lueHg4i4yM1Dr+zTffMADso48+0nrs0KFDLCgoiHl6erLBgwezNWvWsP3797MdO3awr7/+mjVq1IjxeDw2dOhQlpOTo/e4zfk7a+nSpQwAGz16tMbxhw8fMldXV9axY0eN4ydOnGAA2IYNG/T+GtOmTWNCoZC9evXKFEMm+SgBdnArVqxgANjkyZP1fg5XMHn58iWbPHkyq1u3LvP19WXOzs6scuXK7LPPPtP6JS+Xy9myZctYnTp1mJeXF3Nzc2Ph4eGsf//+7M2bN6rzbt++zfr378/KlSvHnJycmJ+fH2vUqBFbt25diV+nqRPgjIwMBoBVqVKFMaYZ1FauXMlq1qzJRCKRRqJ0/Phx1rlzZ+bj48OcnZ1Z1apV2YIFC5hUKtW6/qNHj9jIkSNZ+fLlmbOzMwsMDGQdOnRghw8fVp3D9X1ITk5mn3zyCatUqRJzcXFhXl5eLDo6mk2cOFHj+roStoMHD7KYmBjm6enJXFxcWO3atdkPP/zA5HK5xnlDhgxhAFhqaiobM2YMCwwMZM7OzqxevXrs0KFD+ry1jDHdCTBjjPXu3ZsBYGfPntU4/uDBAzZkyBAWEhLCnJycWNmyZdmHH37ImShnZGSw2bNns+rVqzMXFxfm4+PDGjRowJYvX646Jz09nc2cOZM1btyY+fv7MycnJxYeHs4+/vhjlpSUpHVNYxLghIQE5ubmxsLCwlhWVlax5zNW8B6dOHFC6zHl90Fd69atWXh4OHvy5Anr168f8/PzYwDYtWvXmKurK2vfvj3n11mzZg0DwLZu3ao6JpfL2erVq1nDhg2Zm5sbc3NzY02bNmW7d+/Wa+yMMTZ27FgGgPP7o3zfrl+/zjp27Mg8PT1ZmTJl2IgRI1hmZiaTy+Xsm2++YZGRkczZ2ZlFR0ezP//8k/PryGQyFh4ezt566y3GGGNvv/02Cw0N5fz/pSsBZoyx+vXrMwAaycabN29YhQoVWGRkJDt37pzO1/rixQvWs2dP5uPjwy5dulTse8OYZgKck5PDMjMz9XqePgrHpejoaCYSiVjlypXZpk2bVGPu168fK1OmDHNzc2M9evTQmWgdPXqUAWC//vori4+PZ05OTmzKlCmc5+pKgG/cuMEAaCWGv/zyC+Pz+Wzw4MGc/++UDh06xEJDQ1n79u2ZWCzW631Qj5Xp6eksLy+vyPOfPn3KYmNjiz2PMcZ69uzJGacYY6xjx46Mx+Np/DGr/j3JysrSK5FX/hGpHreI8WgKhINT3lIZM2aMUde5fv06du3ahV69eqFChQpgjOHkyZOYP38+rly5ggMHDqjO/frrrzFz5kx07doVI0eOhJOTE54/f46DBw/izZs3CAoKQlJSEtq0aQO5XI4PPvgAFSpUQEpKCq5fv46///4bw4cPN2q8xrp37x4AICAgQOP40qVL8ebNG4wePRrlypWDp6cnAGD9+vUYOXIk6tati2nTpsHHxwdnzpzB9OnTceXKFWzfvl11jf/++w/t2rVDdnY2hg8fjtq1ayM9PR3nzp3D0aNH0aFDB53jevfdd3Hy5EmMGjUKdevWhVgsxoMHD3D8+PFiX9O6deswatQoRERE4NNPP4WHhwd27tyJsWPH4tq1a1i9erXWczp16gRfX1/MmDED2dnZWLJkCbp374779++jfPnyer2Xujx8+BCA5m3Bq1evIiYmBm5ubhg+fDjCw8Nx//59/PTTTzh27BguXLgAb29vAEBaWhpatmyJGzduoGfPnhgxYgQEAgFu3ryJ33//HWPHjgUAvHz5EmvWrEGfPn0wYMAAiEQinD9/HqtWrcLp06dx8eJFODk5GfValPbv34/s7GxMnDgRbm5uJrkml8zMTLRs2RINGjTAF198gYyMDISGhuKdd97B9u3b8eLFC63pFZs2bYKXl5fGbfhhw4Zh8+bN6NmzJwYNGgQA+P333/HOO+/gp59+0itunDhxApUqVdI5n/Xly5do164d+vTpo5pbv27dOuTk5MDX1xenT5/GBx98AIFAgKVLl6J37964d+8
"text/plain": [
"<Figure size 800x400 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAs0AAAGdCAYAAADpKU7rAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAABV3klEQVR4nO3deXjMV///8ddENgmJSCyxNIglUe6WKqU3Yq26aysa6m7FWr1x21pbqaW1tNWFFlW0tFRrrd5FS23daLU/rd4VtIpSVBBZhIjk/P7wzdzGTPLJKhLPx3XNdWXO55zPnPPJ5J33nDlzxmaMMQIAAACQIbeC7gAAAABwqyNpBgAAACyQNAMAAAAWSJoBAAAACyTNAAAAgAWSZgAAAMACSTMAAABggaQZAAAAsEDSDAAAAFggaUa+mzx5smw2m44ePVrQXbmt5eb3UKVKFUVEROR5n24HNptNUVFRDmVcT2utWrXSP/7xj4LuBlBopKamKjw8XAMGDCjorhRZJM3IkeTkZL355ptq1aqVypQpIw8PDwUGBioiIkKvvvqqEhISCrqL2XLo0CFNmjRJTZo0UdmyZVWiRAnVrVtX48aNU2xsbJbPExERIZvNZr+5u7urfPny6tKli3bv3p2PI7g9LFmyxOH62mw2lSxZUvfee6/mzJmj1NTUgu5ivvnmm2/0z3/+U1WrVlXx4sXl6+urO++8U0OHDtXPP/9c0N3LUx999JG2b9+u559/Pt8f68EHH5TNZlOrVq0yrJP+gjP95ubmplKlSqlZs2Zavnx5lh8rJSVFSUlJuerv0aNHnf4G0m9BQUG5Ond+OHnypNzd3WWz2fTee+9lWK9KlSoOY/Hw8FClSpXUq1cvHThwIMuPl5iYmOs4cOPv+/rbU089latzS9LFixc1fvx41ahRQ15eXipTpowiIyP166+/OtXdsWNHhn1p0KCBQ91ixYpp8uTJevvtt/XTTz/lup9w5l7QHUDh88cff6hDhw7at2+fmjVrppEjR6p8+fKKjY3Vl19+qdGjR2vTpk3avHlzQXc1y95++2298cYb6tChg3r06CFPT09t375dM2fO1Pvvv6/vvvtO5cqVy9K53NzctHTpUknXXlz8+OOPWrx4sTZs2KDPP/9czZo1y8+hZGjChAkaO3asvLy8st324MGDstls+dCrnHnyySfVpEkTGWN06tQpLV26VMOGDVN0dLTmz59f0N3LU8YYjRw5Uq+99poqVqyoyMhI1apVS2lpafrll1+0Zs0azZs3T8eOHVOlSpUKurt5YvLkyWrRooXq1auXr49z/Phxbd68WdWrV9f27dv1+++/q1q1ahnWnzhxomrWrKnU1FQdOXJECxcu1D//+U8dP35cY8eOddlm27Ztevvtt7Vt2zadOnVKkuTn56dGjRqpR48eevzxx+Xunv1/xV26dNHDDz/sUObt7Z3t8+S3d955R8YYVa1aVYsWLdJjjz2WYd1y5cpp1qxZkq4llrt379ayZcv0ySefaM+ePapZs6ZTm6SkJC1atEjr1q3Tt99+q0uXLsnNzU0VKlTQgw8+qH79+qlRo0Y56vurr77q9EKkdu3aOTpXusuXLysiIkLff/+9OnXqpBEjRigmJkbz5s1To0aNtGvXLtWqVcup3cCBA9W0aVOHssDAQKd63bt311NPPaXnn39eq1atylVf4YIBsuHy5cumbt26plixYub99993Wef33383EydOtN+fNGmSkWSOHDlyk3qZfXv27DGxsbFO5c8884yRZJ566qksnad58+amWLFiTuUrV640kkz79u0zbZ+QkJClx7ldvfPOO0aSee+99xzKExISTKVKlYybm5uJiYkpoN65Jsn07t3boSwkJMQ0b948S+2nTZtmJJmHH37YJCUlOR2/fPmymTx5svnjjz/yoLfXXLp0yaSkpOTZ+bLjyy+/NJLMkiVL8v2xJk+ebIoVK2b27t1rPDw8zPjx413WS49hX375pUP50aNHTfHixY2fn5/T9Tpz5oxp27atkWSaNWtmZs6cadatW2f+85//mAULFphHHnnEFC9e3NSuXdvs3bs3y30+cuSIkWQmTZqU3eHedGlpaaZatWqmXbt25tVXXzWSzMGDB13WDQkJMaGhoU7lL774opFk/vWvfzkd++yzz0y5cuVMyZIlzeOPP24WLlxoNmzYYFauXGmmT59uGjZsaGw2m4mKijKXLl3Kcr/z83/W7NmzjSQzcOBAh/LDhw+b4sWLm7Zt2zqUb9++3Ugy77zzTpYfY+zYscbd3d2cPHkyL7qM65A0I1vmzp1rJJlRo0ZluY2rAPTnn3+aUaNGmXr16pmAgADj6elpatSoYcaPH++UGKSlpZk5c+aYu+++2/j5+RkfHx8TEhJievToYU6fPm2vt3//ftOjRw9TqVIl4+HhYQIDA03Dhg3N4sWLczzen376yUgyDzzwQJbqZ5Q0JyQkGEmmZs2axhjHQPjmm2+aunXrGi8vL4fkatu2baZdu3amVKlSxtPT04SFhZmZM2eaq1evOp3/999/N/379zd33HGH8fT0NGXLljVt2rQxmzdvttdx9Xs4f/68eeqpp0z16tWNt7e38fPzM+Hh4WbEiBEO588oydu0aZOJiIgwJUuWNN7e3uauu+4yb7zxhklLS3Oo17t3byPJXLhwwQwaNMiULVvWeHp6mvr165vPPvssK5fWGJNx0myMMV27djWSzK5duxzKf/vtN9O7d28THBxsPDw8TMWKFc2TTz7pMrlOSEgwkyZNMnfeeafx9vY2pUqVMg0aNDCvv/66vU58fLyZMGGCadSokQkKCjIeHh4mJCTEDB482Jw7d87pnLlJmmNiYoyPj4+pXLmyuXjxomV9Y/53jbZv3+50LP33cL3mzZubkJAQc/ToURMZGWkCAwONJPPTTz+Z4sWLm9atW7t8nIULFxpJZtmyZfaytLQ089Zbb5l7773X+Pj4GB8fH9O4cWOzbt26LPXdGGOGDBliJLn8/aRft3379pm2bduakiVLmtKlS5t+/fqZxMREk5aWZl588UUTGhpqPD09TXh4uPn4449dPk5qaqoJCQkxDz74oDHGmM6dO5sKFSq4/PvKKGk2xph77rnHSHJIUE6fPm2qVq1qQkNDze7duzMc64kTJ0ynTp1MqVKlzPfff295bYxxTJovXbpkEhMTs9QuK26MS+Hh4cbLy8vUqFHDLF261N7nyMhIU7p0aePj42M6duyYYXL2+eefG0nmww8/NGfOnDEeHh5m9OjRLutmlDT//PPPRpJTMrlixQrj5uZmHn/8cZd/d+k+++wzU6FCBdO6dWuTnJycpetwfayMj483V65cybT+sWPHTHR0tGU9Y4zp1KmTyzhljDFt27Y1NpvN4QXw9b+TixcvZin5T3/heX3cQt5geQayJf3tnkGDBuXqPPv27dOaNWv08MMPq2rVqjLGaMeOHZoxY4b27t2rjRs32utOnz5dEyZM0D/+8Q/1799fHh4eOn78uDZt2qTTp0+rXLlyOnfunFq0aKG0tDQ98cQTqlq1qmJjY7Vv3z598cUX6tu3b476+eeff0qSypYtm6vxHjp0SJJUpkwZh/LZs2fr9OnTGjhwoCpVqqSSJUtKurZcpH///qpXr57Gjh2rUqVK6euvv9a4ceO0d+9effDBB/Zz/L//9//UqlUrJSUlqW/fvrrrrrsUHx+v3bt36/PPP1ebNm0y7NcjjzyiHTt2aMCAAapXr56Sk5P122+/adu2bZZjWrx4sQYMGKAqVaro6aefVokSJbR69WoNGTJEP/30k9566y2nNg888IACAgL0zDPPKCkpSa+99po6dOigX3/9VXfccUeWrmVGDh8+LMnxLcsff/xRERER8vHxUd++fRUSEqJff/1V8+fP19atW/Xdd9/J399fkhQXF6emTZvq559/VqdOndSvXz8VK1ZM//3vf7V27VoNGTJE0rXnxMKFC9WtWzf17NlTXl5e+vbbb7VgwQJ99dVX2rNnjzw8PHI1lnQbNmxQUlKSRowYIR8fnzw5pyuJiYlq2rSpGjRooClTpig
"text/plain": [
"<Figure size 800x400 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAsAAAAGdCAYAAAAc18VbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAACkHElEQVR4nOzdd3yT1fcH8E/SpOneiw5aKLSUvfcoG5ElIEOUjaLCDwFliAjIKPhFRBAVAQEFFZQlS/ZUtmwKZRVKGd27TTPu7480acbTNkmTJm3O+/XiJX3y5MlNLKen5zn3Xh5jjIEQQgghhBAbwbf0AAghhBBCCKlIlAATQgghhBCbQgkwIYQQQgixKZQAE0IIIYQQm0IJMCGEEEIIsSmUABNCCCGEEJtCCTAhhBBCCLEplAATQgghhBCbQgkwIYQQQgixKZQAE4PNnz8fPB4P8fHxlh6KTSvP/4ewsDBER0ebfEy2gMfjYfTo0RrH6PMsW9euXfH6669behiEVBoymQxRUVGYMGGCpYdSJVECTAAAYrEYP/zwA7p27QpfX18IhUJ4e3sjOjoaX3/9NbKzsy09RIOkpKRg7NixaNSoEby9veHg4ICaNWti+PDhuH79ut7XiY6OBo/HU/0RCAQICAjAG2+8gfPnz5vxHdiGTZs2aXy+PB4Prq6uaNGiBVatWgWZTGbpIZrNv//+i7fffhs1atSAo6MjnJ2dUa9ePUyePBk3b9609PBMavfu3Thx4gQWLVpk9td67bXXwOPx0LVr1xLPUf7yqPzD5/Ph4eGBjh07YuvWrXq/lkQiQV5eXrnGGx8fr/NvQPnHx8enXNc2h+fPn0MgEIDH4+GXX34p8bywsDCN9yIUChEcHIwRI0bg7t27er9eTk5OueOA9v9v9T8ff/xxua4NALm5ufj0009Ru3ZtiEQi+Pr6YujQobh//77OuSdPnixxLM2bN9c4187ODvPnz8dPP/1k0M8toh+BpQdALO/p06fo27cvbty4gY4dO2LatGkICAhAeno6zpw5gxkzZuDgwYM4fPiwpYeqt4yMDNy9exfdunVDaGgonJ2dER8fj02bNqFFixbYv38/unfvrte1+Hw+Nm/eDEDxi8K1a9ewYcMG7N+/H0ePHkXHjh3N+VZK9Nlnn2HWrFkQiUQGP/fevXvg8XhmGJVx3n//fbRt2xaMMbx48QKbN2/GlClTEBsbi++//97SwzMpxhimTZuGlStXIigoCEOHDkVkZCTkcjlu376NHTt24LvvvsOTJ08QHBxs6eGaxPz589G5c2c0adLErK+TkJCAw4cPo1atWjhx4gQePXqEmjVrlnj+3LlzERERAZlMhsePH2PdunV4++23kZCQgFmzZnE+5/jx4/jpp59w/PhxvHjxAgDg5uaGVq1aYdiwYRg5ciQEAsN/tL7xxhsYOHCgxjEHBweDr2NuGzduBGMMNWrUwPr16/HOO++UeK6/vz+WL18OQJEknj9/Hlu2bMG+fftw6dIlRERE6DwnLy8P69evx65du3DhwgXk5+eDz+cjMDAQr732GsaNG4dWrVoZNfavv/5a55eKunXrGnUtpYKCAkRHR+Py5cvo378/pk6diuTkZHz33Xdo1aoVzp07h8jISJ3nvfvuu+jQoYPGMW9vb53z3nzzTXz88cdYtGgR/vjjj3KNlWhhxKYVFBSwBg0aMDs7O/brr79ynvPo0SM2d+5c1dfz5s1jANjjx48raJSm8+zZM2ZnZ8c6d+6s1/mdOnVidnZ2Ose3b9/OALDevXuX+vzs7GyjxmkrNm7cyACwX375ReN4dnY2Cw4OZnw+nyUnJ1todNwAsFGjRmkcCw0NZZ06ddLr+YsXL2YA2MCBA1leXp7O4wUFBWz+/Pns6dOnJhitQn5+PpNIJCa7niHOnDnDALBNmzaZ/bXmz5/P7Ozs2NWrV5lQKGSffvop53nKGHbmzBmN4/Hx8czR0ZG5ubnpfF5JSUmsR48eDADr2LEjW7p0Kdu1axfbu3cvW7t2LRsyZAhzdHRkdevWZVevXtV7zI8fP2YA2Lx58wx9uxVOLpezmjVrsl69erGvv/6aAWD37t3jPDc0NJSFh4frHP/yyy8ZAPbBBx/oPHbo0CHm7+/PXF1d2ciRI9m6devY/v372fbt29mSJUtYy5YtGY/HY6NHj2b5+fl6j9ucP7O++eYbBoC9++67GscfPnzIHB0dWY8ePTSOnzhxggFgGzdu1Ps1Zs2axQQCAXv+/LkphkyKUAJs49asWcMAsOnTp+v9HK5gkpiYyKZPn86aNGnCPD09mb29Patduzb79NNPdX7Iy+VytmrVKta4cWPm5ubGnJycWGhoKBs2bBh7+fKl6rw7d+6wYcOGseDgYCYUCpm3tzdr2bIl27Bhg9HvVyqVMhcXF9a4cWO9zi8pAc7OzmYAWEREBGNMM6j98MMPrEGDBkwkEmkkSsePH2e9evViHh4ezN7entWpU4ctXbqUSaVSnes/evSIjR8/nlWvXp3Z29szPz8/1r17d3b48GHVOVz/H9LS0tjHH3/MatWqxRwcHJibmxuLiopiU6dO1bh+SQnbwYMHWXR0NHN1dWUODg6sUaNG7Ntvv2VyuVzjvFGjRjEALCMjg02cOJH5+fkxe3t71rRpU3bo0CF9PlrGWMkJMGOMDRo0iAFg586d0zj+4MEDNmrUKFatWjUmFApZUFAQe//99zkT5ezsbDZv3jxWr1495uDgwDw8PFjz5s3Z6tWrVedkZWWxzz77jLVq1Yr5+PgwoVDIQkND2YcffshSU1N1rlmeBDg5OZk5OTmxkJAQlpubW+b5jBV/RidOnNB5TPn/QV2nTp1YaGgoi4+PZ0OHDmXe3t4MALt+/TpzdHRk3bp143yddevWMQBsy5YtqmNyuZz9+OOPrEWLFszJyYk5OTmxNm3asF27duk1dsYYmzRpEgPA+f9H+bnduHGD9ejRg7m6ujIvLy82btw4lpOTw+RyOfvyyy9ZeHg4s7e3Z1FRUeyvv/7ifB2ZTMZCQ0PZa6+9xhhjbMCAASwwMJDz31dJCTBjjDVr1owB0Eg2Xr58yWrUqMHCw8PZ+fPnS3yvz549Y/3792ceHh7s8uXLZX42jGkmwPn5+SwnJ0ev5+lDOy5FRUUxkUjEateuzTZv3qwa89ChQ5mXlxdzcnJi/fr1KzHROnr0KAPAtm3bxpKSkphQKGQzZszgPLekBPjmzZsMgE5i+NtvvzE+n89GjhzJ+e9O6dChQywwMJB169aNicVivT4H9ViZlZXFCgsLSz3/yZMnLDY2tszzGGOsf//+nHGKMcZ69OjBeDyexi+z6v9PcnNz9Urklb9EqsctUn7UAmHjlLdUJk6cWK7r3LhxAzt27MDAgQNRo0YNMMZw8uRJxMTE4OrVqzhw4IDq3CVLluCzzz7D66+/jvHjx0MoFCIhIQEHDx7Ey5cv4e/vj9TUVHTu3BlyuRzvvfceatSogfT0dNy4cQOnT5/G2LFj9RqXRCJBZmYmpFIpnj59ihUrViAnJ6fck3Hi4uIAAL6+vhrHv/nmG7x8+RLvvvsugoOD4erqCgD46aefMH78eDRp0gSzZs2Ch4cH/vnnH8yePRtXr17F77//rrrGf//9h65duyIvL0/Vx5yVlYXz58/j6NGjpbZuDBkyBCdPnsSECRPQpEkTiMViPHjwAMePHy/zPW3YsAETJkxAWFgYPvnkE7i4uODPP//EpEmTcP36dfz44486z+nZsyc8PT0xZ84c5OXlYeXKlejbty/u37+P6tWr6/VZluThw4cANG8LXrt2DdHR0XBycsLYsWMRGhqK+/fv4/vvv8exY8dw8eJFuLu7AwAyMzPRoUMH3Lx5E/3798e4ceNgZ2eHW7duYefOnZg0aRIAIDExEevWrcPgwYMxfPhwiEQiXLhwAWvXrsXZs2dx6dIlCIXCcr0Xpf379yMvLw9Tp06Fk5OTSa7JJScnBx06dEDz5s2xYMECZGdnIzAwEG+88QZ+//13PHv2TKe9YvPmzXBzc9O4DT9mzBj8/PPP6N+/P0aMGAEA2LlzJ9544w1
"text/plain": [
"<Figure size 800x400 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAsoAAAGdCAYAAAAL9VWSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAACrcElEQVR4nOzdd1hT59sH8G/CCEOWDAVFUFRE3LPuvXDWhatqXbWt/lx171pXX6uto9ZVZ12tq1Xcs7YqSt2CKIKKg71HIMnz/oGJOTkJJBBIAvfnurhaTp5z8iTgzZ37PEPAGGMghBBCCCGEcAgN3QFCCCGEEEKMESXKhBBCCCGEqEGJMiGEEEIIIWpQokwIIYQQQogalCgTQgghhBCiBiXKhBBCCCGEqEGJMiGEEEIIIWpQokwIIYQQQogalCgTQgghhBCiBiXKpFgsWbIEAoEAUVFRhu5KmVaUn4O3tzfat2+v9z6VBQKBAKNHj+Yco/ezYJ06dULPnj0N3Q1CTIZUKoWfnx/Gjx9v6K6UWpQoE62JxWL88ssv6NSpE1xdXWFhYQFnZ2e0b98e69atQ1pamqG7WCQZGRmoWrUqBAIBxo0bp/V57du3h0AgUHyZm5ujYsWK+PTTT3Hz5s1i7HHZsGvXLs77KxAIYGdnh6ZNm2L9+vWQSqWG7mKx+ffffzFixAhUrVoV1tbWsLW1hb+/PyZPnoyHDx8aunt6dfz4cVy+fBnfffddsT9Xjx49IBAI0KlTJ41t5B8y5V9CoRCOjo5o27YtfvvtN62fKzc3F5mZmUXqb1RUFO/fgPzLxcWlSNcuDm/fvoW5uTkEAgH27t2rsZ23tzfntVhYWKBy5coYPnw4wsLCtH6+9PT0IscB1Z+38tc333xTpGsDeX9f5s2bhxo1akAkEsHV1RWBgYF49uwZr+2VK1c09qVJkyactmZmZliyZAl+/fVX3L9/v8j9JHzmhu4AMQ2vXr1C79698eDBA7Rt2xbTp09HxYoVkZSUhL///huzZs3C6dOnce7cOUN3tdAWLFiA+Pj4Qp0rFAqxe/duAHkfKO7du4cdO3bg1KlTuHDhAtq2bavPrmptwYIFmDNnDkQikc7nPn36FAKBoBh6VThffvklWrZsCcYY3r17h927d2PKlCkIDQ3F5s2bDd09vWKMYfr06fjxxx9RqVIlBAYGwtfXFzKZDI8fP8aRI0fw888/4+XLl6hcubKhu6sXS5YsQYcOHdCwYcNifZ7Xr1/j3LlzqF69Oi5fvowXL16gWrVqGtsvXLgQNWvWhFQqRWRkJLZt24YRI0bg9evXmDNnjtpzLl26hF9//RWXLl3Cu3fvAAD29vZo3rw5hgwZgpEjR8LcXPc/v59++in69+/POWZlZaXzdYrbzp07wRhD1apVsX37dnz22Wca21aoUAFr1qwBkJdM3rx5E/v27cPJkydx+/Zt1KxZk3dOZmYmtm/fjmPHjuHWrVvIysqCUCiEh4cHevTogbFjx6J58+aF6vu6det4Hz5q165dqGvJZWdno3379rhz5w769u2LadOmIS4uDj///DOaN2+OGzduwNfXl3fehAkT0KZNG84xZ2dnXrtBgwbhm2++wXfffYfff/+9SH0lajBCCpCdnc3q1q3LzMzM2P79+9W2efHiBVu4cKHi+8WLFzMALDIysoR6WTTBwcHMzMyMrV27lgFgY8eO1frcdu3aMTMzM97xw4cPMwAsICAg3/PT0tJ07m9ZsnPnTgaA7d27l3M8LS2NVa5cmQmFQhYXF2eg3qkHgI0aNYpzzMvLi7Vr106r85cvX84AsP79+7PMzEze49nZ2WzJkiXs1atXeuhtnqysLJabm6u36+ni77//ZgDYrl27iv25lixZwszMzNjdu3eZhYUFmzdvntp28hj2999/c45HRUUxa2trZm9vz3u/YmNjWdeuXRkA1rZtW7Zq1Sp27Ngx9tdff7EtW7awwYMHM2tra1a7dm129+5drfscGRnJALDFixfr+nJLnEwmY9WqVWPdu3dn69atYwDY06dP1bb18vJiPj4+vOPff/89A8C++uor3mNnz55lFSpUYHZ2dmzkyJFs27Zt7NSpU+zw4cNsxYoVrFmzZkwgELDRo0ezrKwsrftdnH+zfvrpJwaATZgwgXM8IiKCWVtbs65du3KOX758mQFgO3fu1Po55syZw8zNzdnbt2/10WWihBJlUqBNmzYxAGzGjBlan6Mu6Lx584bNmDGDNWzYkDk5OTFLS0tWo0YNNm/ePF4yIJPJ2Pr161mDBg2Yvb09s7GxYV5eXmzIkCHs/fv3inZPnjxhQ4YMYZUrV2YWFhbM2dmZNWvWjO3YsUPrvubk5LB69eqxPn36KP4g6SNRTktLYwBYzZo1GWPc4PfLL7+wunXrMpFIxEmoLl26xLp3784cHR2ZpaUlq1WrFlu1ahWTSCS867948YKNGzeOValShVlaWjI3NzfWpUsXdu7cOUUbdT+HxMRE9s0337Dq1aszKysrZm9vz/z8/Ni0adM419eU2J0+fZq1b9+e2dnZMSsrK1a/fn22ceNGJpPJOO1GjRrFALDk5GQ2ceJE5ubmxiwtLVmjRo3Y2bNntXlrGWOaE2XGGBswYAADwG7cuME5/vz5czZq1Cjm7u7OLCwsWKVKldiXX36pNqFOS0tjixcvZv7+/szKyoo5OjqyJk2asA0bNijapKamsgULFrDmzZszFxcXZmFhwby8vNjXX3/NEhISeNcsSqIcFxfHbGxsmKenJ8vIyCiwPWMf36PLly/zHpP/HJS1a9eOeXl5saioKBYYGMicnZ0ZAHb//n1mbW3NOnfurPZ5tm3bxgCwffv2KY7JZDK2detW1rRpU2ZjY8NsbGxYixYt2LFjx7TqO2OMTZo0iQFQ+/ORv28PHjxgXbt2ZXZ2dqx8+fJs7NixLD09nclkMvb9998zHx8fZmlpyfz8/Niff/6p9nmkUinz8vJiPXr0YIwx1q9fP+bh4aH235emRJkxxho3bswAcJKS9+/fs6pVqzIfHx928+ZNja81Ojqa9e3blzk6OrI7d+4U+N4wxk2Us7KyWHp6ulbnaUM1Lvn5+TGRSMRq1KjBdu/erehzYGAgK1++PLOxsWF9+vTRmJBduHCBAWCHDh1isbGxzMLCgs2aNUttW02J8sOHDxkAXgJ54MABJhQK2ciRI9X+u5M7e/Ys8/DwYJ07d2ZisVir90E5VqamprKcnJx82798+ZKFhoYW2I4xxvr27as2TjHGWNeuXZlAIOB86FX+mWRkZGiV8Ms/bCrHLaIfNPSCFEh+K2fixIlFus6DBw9w5MgR9O/fH1WrVgVjDFeuXMHKlStx9+5dBAUFKdquWLECCxYsQM+ePTFu3DhYWFjg9evXOH36NN6/f48KFSogISEBHTp0gEwmwxdffIGqVasiKSkJDx48wLVr1zBmzBit+vX999/jxYsXOHnypF7Hu4aHhwMAXF1dOcd/+uknvH//HhMmTEDlypVhZ2cHAPj1118xbtw4NGzYEHPmzIGjoyP++ecfzJ07F3fv3sXBgwcV1/jvv//QqVMnZGZmYsyYMahfvz5SU1Nx8+ZNXLhwAV26dNHYr8GDB+PKlSsYP348GjZsCLFYjOfPn+PSpUsFvqYdO3Zg/Pjx8Pb2xsyZM1GuXDn88ccfmDRpEu7fv4+tW7fyzunWrRucnJwwf/58ZGZm4scff0Tv3r3x7NkzVKlSRav3UpOIiAgA3NuR9+7dQ/v27WFjY4MxY8bAy8sLz549w+bNm3Hx4kUEBwfDwcEBAJCSkoI2bdrg4cOH6Nu3L8aOHQszMzM8evQIR48exaRJkwAAb968wbZt2zBw4EAMHToUIpEIt27dwpYtW3D9+nXcvn0bFhYWRXotcqdOnUJmZiamTZsGGxsbvVxTnfT0dLRp0wZNmjTB0qVLkZaWBg8PD3z66ac4ePAgoqOjecM6du/eDXt7e87t/88//xx79uxB3759MXz4cADA0aNH8emnn2Lz5s1axY3Lly+jevXqGsfbvnnzBp06dcLAgQMVY/937NiBrKwsODk54fr
"text/plain": [
"<Figure size 800x400 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"==> Union Model (Detection + Classification) - 5 classes\n",
"[mean Precisions] 0.5704349663931353\n",
"[mean Recalls] 0.2803682204993534\n",
"[mAP@.5] 0.40195737662770653\n",
"[mAP@.5:.95] 0.40195737662770653\n",
"[mean time] 0.042649137104018055s\n",
"\n",
"==> Convert to Model 1\n",
"[mean Precisions] 0.6230357850195234\n",
"[mean Recalls] 0.3059043619902019\n",
"[mAP@.5] 0.4484272475282988\n",
"[mAP@.5:.95] 0.4484272475282988\n",
"[mean time] 0.042649137104018055s\n",
"\n",
"==> Convert to Model 2\n",
"[mean Precisions] 0.6190156182172799\n",
"[mean Recalls] 0.30386862037965345\n",
"[mAP@.5] 0.3349644805230888\n",
"[mAP@.5:.95] 0.8897615083738772\n",
"[mean time] 0.042649137104018055s\n",
"\n"
]
}
],
"source": [
"lst_result = [[],[],[],[],[]] # class 0~4\n",
"eval_times = []\n",
"\n",
"for i in range(0, len(image_paths)):\n",
" # Image name\n",
" img_dir = os.path.dirname(image_paths[i])\n",
" img_name = os.path.basename(image_paths[i])\n",
"\n",
" # Load image\n",
" img = cv2.imread(image_paths[i]) \n",
" img = cv2.resize(img, (640, 640))\n",
" labels = read_label(label_paths[i])\n",
" Evaluate(model_detect, model_cls, img, img_name, labels, False)\n",
"\n",
"Ps = []\n",
"Rs = []\n",
"ap5s = []\n",
"ap95s = []\n",
"\n",
"# Class\n",
"# 0: head\n",
"# 1: helmet\n",
"# 2: face\n",
"# 3: mask\n",
"# 4: helmet & mask\n",
"\n",
"# each class result\n",
"class_len = len(lst_result)\n",
"\n",
"for i in range(0, class_len): \n",
" results_all = CompareGTandBox(lst_result[i], -1) # all P, R\n",
" results_5 = CompareGTandBox(lst_result[i], 0) # mAP@.5\n",
" results_95 = CompareGTandBox(lst_result[i], 1) # mAP@.5:.95\n",
"\n",
" p, r = GetPR(results_all)\n",
" p1, r1 = GetPR(results_5)\n",
" p2, r2 = GetPR(results_95)\n",
" ap1 = calculate_AP(p1, r1)\n",
" ap2 = calculate_AP(p2, r2)\n",
" # print(p1)\n",
" # print(r1)\n",
" # print(p2)\n",
" # print(r2)\n",
" if len(p) != 0:\n",
" p = sum(p) / len(p)\n",
" else:\n",
" p = 0\n",
" if len(r) != 0:\n",
" r = sum(r) / len(r)\n",
" else:\n",
" r = 0\n",
"\n",
" Ps.append(p)\n",
" Rs.append(r)\n",
" ap5s.append(ap1)\n",
" ap95s.append(ap2)\n",
" print(f'Class {i}')\n",
" print(f'meanP:', p)\n",
" print(f'meanR:', r)\n",
" print(f\"AP@.5:\", ap1)\n",
" print(f\"AP@.5:.95:\", ap2)\n",
"\n",
" fig, axs = plt.subplots(1, 2, figsize=(8, 4))\n",
" axs[0].plot(r1, p1)\n",
" axs[0].set_xlabel('Recall')\n",
" axs[0].set_ylabel('Precision')\n",
" axs[1].plot(r2, p2)\n",
" axs[1].set_xlabel('Recall')\n",
" axs[1].set_ylabel('Precision')\n",
" plt.suptitle(f'Class {i} Precision-Recall Curve (mAP@.5, mAP@.5:.95)')\n",
" print()\n",
" \n",
"# plt.tight_layout()\n",
"plt.show()\n",
"\n",
"mP = sum(Ps) / class_len\n",
"mR = sum(Rs) / class_len\n",
"mAP_5 = sum(ap5s) / class_len\n",
"mAP_95 = sum(ap95s) / class_len\n",
"mean_time = sum(eval_times) / len(eval_times)\n",
"\n",
"print(f'==> Union Model (Detection + Classification) - 5 classes')\n",
"print(f'[mean Precisions] {mP}')\n",
"print(f'[mean Recalls] {mR}')\n",
"print(f'[mAP@.5] {mAP_5}')\n",
"print(f'[mAP@.5:.95] {mAP_95}')\n",
"print(f'[mean time] {mean_time}s')\n",
"print()\n",
"\n",
"\n",
"#### Convert to evaluate ####\n",
"\n",
"# Class\n",
"# 0: head\n",
"# 1: helmet\n",
"# 2: face\n",
"# 3: mask\n",
"# 4: helmet & mask\n",
"\n",
"# => Model 1: (0, 2, 3) - head, (1, 4) - helmet \n",
"# => Model 2: (0, 1, 2) - face, (3, 4) - mask \n",
"\n",
"print(f'==> Convert to Model 1')\n",
"print(f'[mean Precisions] {((Ps[0] + Ps[2] + Ps[3]) / 3 + (Ps[1] + Ps[4]) / 2) / 2}')\n",
"print(f'[mean Recalls] {((Rs[0] + Rs[2] + Rs[3]) / 3 + (Rs[1] + Rs[4]) / 2) / 2}')\n",
"print(f'[mAP@.5] {((ap5s[0] + ap5s[2] + ap5s[3]) / 3 + (ap5s[1] + ap5s[4]) / 2) / 2}')\n",
"print(f'[mAP@.5:.95] {((ap95s[0] + ap95s[2] + ap95s[3]) / 3 + (ap95s[1] + ap95s[4]) / 2) / 2}')\n",
"print(f'[mean time] {mean_time}s')\n",
"print()\n",
"\n",
"print(f'==> Convert to Model 2')\n",
"print(f'[mean Precisions] {((Ps[0] + Ps[1] + Ps[2]) / 3 + (Ps[3] + Ps[4]) / 2) / 2}')\n",
"print(f'[mean Recalls] {((Rs[0] + Rs[1] + Rs[2]) / 3 + (Rs[3] + Rs[4]) / 2) / 2}')\n",
"print(f'[mAP@.5] {((ap5s[0] + ap5s[1] + ap5s[2]) / 3 + (ap5s[3] + ap5s[4]) / 3) / 2}')\n",
"print(f'[mAP@.5:.95] {((ap95s[0] + ap95s[1] + ap95s[2]) / 3 + (ap95s[3] + ap95s[4]) / 2)}')\n",
"print(f'[mean time] {mean_time}s')\n",
"print()\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}