|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +""" |
| 3 | +Created on Sun Feb 14 15:26:57 2021 |
| 4 | +
|
| 5 | +@author: trabz |
| 6 | +""" |
| 7 | +import albumentations as A |
| 8 | + |
| 9 | +import torch |
| 10 | + |
| 11 | + |
| 12 | +from xmlParser import Parser |
| 13 | +import glob |
| 14 | +import numpy as np |
| 15 | +from matplotlib import pyplot as plt |
| 16 | +import cv2 |
| 17 | +import torchvision.transforms as transforms |
| 18 | + |
| 19 | + |
| 20 | +class dataloader(): |
| 21 | + |
| 22 | + def __init__(self,path,transform=None): |
| 23 | + |
| 24 | + self.paths=glob.glob(path) |
| 25 | + self.transform=transform |
| 26 | + |
| 27 | + |
| 28 | + def __len__(self): |
| 29 | + return len(self.paths) |
| 30 | + |
| 31 | + def __getitem__(self,idx): |
| 32 | + |
| 33 | + annotation=(Parser.myType(self.paths[idx],idx,classes=['bird','zebra'])) |
| 34 | + image=plt.imread(self.paths[annotation['image_id']].replace('xml','jpg')) |
| 35 | + |
| 36 | + if self.transform is not None: |
| 37 | + |
| 38 | + augmented = self.transform(image=image, bboxes=annotation['bbox'], labels=annotation['label']) |
| 39 | + |
| 40 | + |
| 41 | + return image,augmented,annotation |
| 42 | + |
| 43 | + |
| 44 | +def collate_fn(batch): |
| 45 | + return tuple(zip(*batch)) |
| 46 | + |
| 47 | + |
| 48 | +bbox_params = A.BboxParams( |
| 49 | + format='pascal_voc', |
| 50 | + min_area=1, |
| 51 | + min_visibility=0.5, |
| 52 | + label_fields=['labels'] |
| 53 | +) |
| 54 | + |
| 55 | +aug = A.Compose({ |
| 56 | + #A.Resize(500, 500,p=0.2), |
| 57 | + A.RGBShift(r_shift_limit=40,g_shift_limit=40,b_shift_limit=40,p=0.04), |
| 58 | + A.RandomBrightness(p=0.01), |
| 59 | + A.RandomContrast(p=0.01), |
| 60 | + A.CLAHE(p=0.02), |
| 61 | + A.ToGray(p=0.4), |
| 62 | + A.Blur(blur_limit=8,p=0.1), |
| 63 | + A.RandomBrightness(p=0.1), |
| 64 | + A.CenterCrop(100, 100,p=0.01), |
| 65 | + A.RandomCrop(222, 222,p=0.1), |
| 66 | + A.HorizontalFlip(p=0.1), |
| 67 | + A.Rotate(limit=(-90, 90),p=0.2), |
| 68 | + A.VerticalFlip(p=0.1), |
| 69 | + A.ShiftScaleRotate(), |
| 70 | + |
| 71 | + },bbox_params=bbox_params) |
| 72 | + |
| 73 | + |
| 74 | +path=path= 'Image A*/train/*.xml' |
| 75 | + |
| 76 | +dataset = dataloader(path,aug) |
| 77 | +data_loader = torch.utils.data.DataLoader( |
| 78 | + dataset, batch_size=1, collate_fn=collate_fn,shuffle=False) |
| 79 | + |
| 80 | + |
| 81 | + |
| 82 | + |
| 83 | +plt.figure(figsize=(12,12)) |
| 84 | +for idx,(image,imgO,result) in enumerate(data_loader): |
| 85 | + |
| 86 | + |
| 87 | + imgA=imgO[0]['image'] |
| 88 | + image=image[0] |
| 89 | + for idc,bbox in enumerate(imgO[0]['bboxes']): |
| 90 | + xmin,ymin,xmax,ymax=bbox |
| 91 | + xmin,xmax=np.clip([xmin,xmax],5,imgA.shape[0]-5).astype('int') |
| 92 | + ymin,ymax=np.clip([ymin,ymax],5,imgA.shape[1]-5).astype('int') |
| 93 | + |
| 94 | + xpastmin,ypastmin,xpastmax,ypastmax=np.clip((result[0]['bbox'][idc]),0,max(image.shape)-10) |
| 95 | + |
| 96 | + imgA=cv2.rectangle(np.array(imgA),((xmin),(ymin)),((xmax),(ymax)) |
| 97 | + ,color=[0,245,0],thickness=4) |
| 98 | + |
| 99 | + image=cv2.rectangle(image,((xpastmin),(ypastmin)),((xpastmax),(ypastmax)) |
| 100 | + ,color=[112,9,11],thickness=4) |
| 101 | + |
| 102 | + |
| 103 | + |
| 104 | + |
| 105 | + sz,wd,_=np.array(image.shape)-np.array((imgA).shape) |
| 106 | + img2=imgA |
| 107 | + imgA=np.pad((imgA),((sz//2,sz//2),(wd//2,wd//2),(0,0))) |
| 108 | + |
| 109 | + |
| 110 | + plt.imsave(f'Albumentations/images/{idx}.png',np.hstack(((imgA).astype('uint8'),image))) |
| 111 | + plt.axis('off') |
| 112 | + plt.tight_layout() |
| 113 | + #plt.imshow(np.dstack((img[:,:,2],img[:,:,1],img[:,:,0]))) |
| 114 | + |
| 115 | + |
| 116 | + |
| 117 | + |
| 118 | + |
0 commit comments