|
| 1 | +""" |
| 2 | +Works with the chess pieces dataset. |
| 3 | +""" |
| 4 | +import csv |
| 5 | +import functools |
| 6 | +import os |
| 7 | +import shutil |
| 8 | +from random import shuffle |
| 9 | + |
| 10 | +import pandas as pd |
| 11 | + |
| 12 | +from lc2fen.fen import PIECE_TYPES |
| 13 | + |
| 14 | +PIECES_TO_CLASSNUM = { |
| 15 | + '_': 0, |
| 16 | + 'b': 1, |
| 17 | + 'k': 2, |
| 18 | + 'n': 3, |
| 19 | + 'p': 4, |
| 20 | + 'q': 5, |
| 21 | + 'r': 6, |
| 22 | + 'B': 7, |
| 23 | + 'K': 8, |
| 24 | + 'N': 9, |
| 25 | + 'P': 10, |
| 26 | + 'Q': 11, |
| 27 | + 'R': 12, |
| 28 | +} |
| 29 | + |
| 30 | + |
| 31 | +def create_dataset_csv(dataset_dir, csv_name, frac=1, validate=0.2, test=0.1): |
| 32 | + """ |
| 33 | + Deprecated, not currently in use. |
| 34 | +
|
| 35 | + Creates the csv for the dataset. |
| 36 | +
|
| 37 | + :param dataset_dir: Directory of the dataset. |
| 38 | + :param csv_name: Name of the output csv. |
| 39 | + :param frac: Fraction of images to load. Default 1. |
| 40 | + :param validate: Fraction of images to label as VAL. Default 0.2. |
| 41 | + :param test: Fraction of images to label as TEST. Default 0.1. |
| 42 | + :return: Number of loaded images. |
| 43 | + """ |
| 44 | + def load_dataset_images(dataset_dir, frac): |
| 45 | + """ |
| 46 | + Returns a DataFrame with the loaded dataset images. |
| 47 | +
|
| 48 | + :param dataset_dir: Directory of the dataset. |
| 49 | + :param frac: Fraction of images to load. |
| 50 | + """ |
| 51 | + file_names = [(piece_type, [dataset_dir + piece_type + '/' + str(x) for |
| 52 | + x in os.listdir(dataset_dir + piece_type)]) |
| 53 | + for piece_type in PIECE_TYPES] |
| 54 | + |
| 55 | + file_names_label = [list(zip(images, [piece_type for x in images])) for |
| 56 | + piece_type, images in file_names] |
| 57 | + |
| 58 | + data_frame = pd.DataFrame(data=functools.reduce(lambda x, y: x + y, |
| 59 | + file_names_label)) |
| 60 | + data_frame = data_frame.rename(columns={0: "image_name", 1: "label"}) |
| 61 | + # Shuffle rows |
| 62 | + return data_frame.sample(frac=frac).reset_index(drop=True) |
| 63 | + |
| 64 | + data_frame = load_dataset_images(dataset_dir, frac) |
| 65 | + total_rows = len(data_frame.index) |
| 66 | + |
| 67 | + with open(dataset_dir + csv_name, 'w', newline='', |
| 68 | + encoding='utf-8') as csvfile: |
| 69 | + csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', |
| 70 | + quoting=csv.QUOTE_MINIMAL) |
| 71 | + start_test = 1.0 - test |
| 72 | + start_validate = start_test - validate |
| 73 | + |
| 74 | + for i, row in data_frame.iterrows(): |
| 75 | + percentage = i / total_rows |
| 76 | + set_str = 'TRAIN' |
| 77 | + if percentage >= start_test: |
| 78 | + set_str = 'TEST' |
| 79 | + elif percentage >= start_validate: |
| 80 | + set_str = 'VAL' |
| 81 | + |
| 82 | + filename, label = row |
| 83 | + label = PIECES_TO_CLASSNUM[label] |
| 84 | + csvwriter.writerow([set_str, filename, label]) |
| 85 | + return total_rows |
| 86 | + |
| 87 | + |
| 88 | +def randomize_dataset(dataset_dir): |
| 89 | + """ |
| 90 | + Randomizes the order of the images in the subdirectories of |
| 91 | + dataset_dir. Renames them to <number>.jpg. |
| 92 | +
|
| 93 | + :param dataset_dir: Directory of the dataset. |
| 94 | + """ |
| 95 | + dirs = [d for d in os.listdir(dataset_dir) if |
| 96 | + os.path.isdir(os.path.join(dataset_dir, d))] |
| 97 | + for dir in dirs: |
| 98 | + files = os.listdir(dataset_dir + "/" + dir) |
| 99 | + shuffle(files) |
| 100 | + |
| 101 | + for i, file in enumerate(files): |
| 102 | + path = os.path.join(dataset_dir, dir, file) |
| 103 | + if os.path.isfile(path): |
| 104 | + newpath = os.path.join(dataset_dir, dir, str(i) + ".jpg") |
| 105 | + os.rename(path, newpath) |
| 106 | + |
| 107 | + |
| 108 | +def split_dataset(dataset_dir, train_dir, validation_dir, train_perc=0.8): |
| 109 | + """ |
| 110 | + Splits dataset_dir into train_dir and validation_dir given |
| 111 | + train_perc. |
| 112 | +
|
| 113 | + :param dataset_dir: Directory of the whole dataset. |
| 114 | + :param train_dir: Train directory. |
| 115 | + :param validation_dir: Validation directory. |
| 116 | + :param train_perc: Percentage of training images. Default 0.8. |
| 117 | + """ |
| 118 | + shutil.rmtree(train_dir) |
| 119 | + shutil.rmtree(validation_dir) |
| 120 | + |
| 121 | + os.mkdir(train_dir) |
| 122 | + os.mkdir(train_dir + '/_/') |
| 123 | + os.mkdir(train_dir + '/r/') |
| 124 | + os.mkdir(train_dir + '/n/') |
| 125 | + os.mkdir(train_dir + '/b/') |
| 126 | + os.mkdir(train_dir + '/q/') |
| 127 | + os.mkdir(train_dir + '/k/') |
| 128 | + os.mkdir(train_dir + '/p/') |
| 129 | + os.mkdir(train_dir + '/R/') |
| 130 | + os.mkdir(train_dir + '/N/') |
| 131 | + os.mkdir(train_dir + '/B/') |
| 132 | + os.mkdir(train_dir + '/Q/') |
| 133 | + os.mkdir(train_dir + '/K/') |
| 134 | + os.mkdir(train_dir + '/P/') |
| 135 | + |
| 136 | + os.mkdir(validation_dir) |
| 137 | + os.mkdir(validation_dir + '/_/') |
| 138 | + os.mkdir(validation_dir + '/r/') |
| 139 | + os.mkdir(validation_dir + '/n/') |
| 140 | + os.mkdir(validation_dir + '/b/') |
| 141 | + os.mkdir(validation_dir + '/q/') |
| 142 | + os.mkdir(validation_dir + '/k/') |
| 143 | + os.mkdir(validation_dir + '/p/') |
| 144 | + os.mkdir(validation_dir + '/R/') |
| 145 | + os.mkdir(validation_dir + '/N/') |
| 146 | + os.mkdir(validation_dir + '/B/') |
| 147 | + os.mkdir(validation_dir + '/Q/') |
| 148 | + os.mkdir(validation_dir + '/K/') |
| 149 | + os.mkdir(validation_dir + '/P/') |
| 150 | + |
| 151 | + dirs = [d for d in os.listdir(dataset_dir) if |
| 152 | + os.path.isdir(os.path.join(dataset_dir, d))] |
| 153 | + for dir in dirs: |
| 154 | + files = os.listdir(os.path.join(dataset_dir, dir)) |
| 155 | + num_train_files = len(files) * train_perc |
| 156 | + for i, file in enumerate(files): |
| 157 | + path = os.path.join(dataset_dir, dir, file) |
| 158 | + if os.path.isfile(path): |
| 159 | + if i < num_train_files: |
| 160 | + newpath = os.path.join(train_dir, dir, file) |
| 161 | + else: |
| 162 | + newpath = os.path.join(validation_dir, dir, file) |
| 163 | + shutil.copy(path, newpath) |
0 commit comments