-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathpreprocessingofdata.py
More file actions
264 lines (195 loc) · 7.74 KB
/
preprocessingofdata.py
File metadata and controls
264 lines (195 loc) · 7.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
import os
import json
import music21 as m21
import numpy as np
import tensorflow.keras as keras
KERN_DATASET_PATH = "deutschl/test"
SAVE_DIR = "dataset"
SINGLE_FILE_DATASET = "file_dataset"
MAPPING_PATH = "mapping.json"
SEQUENCE_LENGTH = 64
# durations are expressed in quarter length
ACCEPTABLE_DURATIONS = [
0.25, # 16th note
0.5, # 8th note
0.75,
1.0, # quarter note
1.5,
2, # half note
3,
4 # whole note
]
def load_songs_in_kern(dataset_path): #
"""Loads all kern pieces in dataset using music21.
:param dataset_path (str): Path to dataset
:return songs (list of m21 streams): List containing all pieces
"""
songs = []
# go through all the files in dataset and load them with music21
for path, subdirs, files in os.walk(dataset_path):
for file in files:
# consider only kern files
if file[-3:] == "krn":
song = m21.converter.parse(os.path.join(path, file))
songs.append(song)
return songs
def has_acceptable_durations(song, acceptable_durations): #
"""Boolean routine that returns True if piece has all acceptable duration, False otherwise.
:param song (m21 stream):
:param acceptable_durations (list): List of acceptable duration in quarter length
:return (bool):
"""
for note in song.flat.notesAndRests:
if note.duration.quarterLength not in acceptable_durations:
return False
return True
def transpose(song):
"""Transposes song to C maj/A min
:param piece (m21 stream): Piece to transpose
:return transposed_song (m21 stream):
"""
# get key from the song
parts = song.getElementsByClass(m21.stream.Part)
measures_part0 = parts[0].getElementsByClass(m21.stream.Measure)
key = measures_part0[0][4]
# estimate key using music21
if not isinstance(key, m21.key.Key):
key = song.analyze("key")
# get interval for transposition. E.g., Bmaj -> Cmaj
if key.mode == "major":
interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("C"))
elif key.mode == "minor":
interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("A"))
# transpose song by calculated interval
tranposed_song = song.transpose(interval)
return tranposed_song
def encode_song(song, time_step=0.25):
"""Converts a score into a time-series-like music representation. Each item in the encoded list represents 'min_duration'
quarter lengths. The symbols used at each step are: integers for MIDI notes, 'r' for representing a rest, and '_'
for representing notes/rests that are carried over into a new time step. Here's a sample encoding:
["r", "_", "60", "_", "_", "_", "72" "_"]
:param song (m21 stream): Piece to encode
:param time_step (float): Duration of each time step in quarter length
:return:
"""
encoded_song = []
for event in song.flat.notesAndRests:
# handle notes
if isinstance(event, m21.note.Note):
symbol = event.pitch.midi # 60
# handle rests
elif isinstance(event, m21.note.Rest):
symbol = "r"
# convert the note/rest into time series notation
steps = int(event.duration.quarterLength / time_step)
for step in range(steps):
# if it's the first time we see a note/rest, let's encode it. Otherwise, it means we're carrying the same
# symbol in a new time step
if step == 0:
encoded_song.append(symbol)
else:
encoded_song.append("_")
# cast encoded song to str
encoded_song = " ".join(map(str, encoded_song))
return encoded_song
def preprocess(dataset_path):
# load folk songs
print("Loading songs...")
songs = load_songs_in_kern(dataset_path)
print(f"Loaded {len(songs)} songs.")
for i, song in enumerate(songs):
# filter out songs that have non-acceptable durations
if not has_acceptable_durations(song, ACCEPTABLE_DURATIONS):
continue
# transpose songs to Cmaj/Amin
song = transpose(song)
# encode songs with music time series representation
encoded_song = encode_song(song)
# save songs to text file
save_path = os.path.join(SAVE_DIR, str(i))
with open(save_path, "w") as fp:
fp.write(encoded_song)
if i % 10 == 0:
print(f"Song {i} out of {len(songs)} processed")
def load(file_path):
with open(file_path, "r") as fp:
song = fp.read()
return song
def create_single_file_dataset(dataset_path, file_dataset_path, sequence_length):
"""Generates a file collating all the encoded songs and adding new piece delimiters.
:param dataset_path (str): Path to folder containing the encoded songs
:param file_dataset_path (str): Path to file for saving songs in single file
:param sequence_length (int): # of time steps to be considered for training
:return songs (str): String containing all songs in dataset + delimiters
"""
new_song_delimiter = "/ " * sequence_length
songs = ""
# load encoded songs and add delimiters
for path, _, files in os.walk(dataset_path):
for file in files:
file_path = os.path.join(path, file)
song = load(file_path)
songs = songs + song + " " + new_song_delimiter
# remove empty space from last character of string
songs = songs[:-1]
# save string that contains all the dataset
with open(file_dataset_path, "w") as fp:
fp.write(songs)
return songs
def create_mapping(songs, mapping_path):
"""Creates a json file that maps the symbols in the song dataset onto integers
:param songs (str): String with all songs
:param mapping_path (str): Path where to save mapping
:return:
"""
mappings = {}
# identify the vocabulary
songs = songs.split()
vocabulary = list(set(songs))
# create mappings
for i, symbol in enumerate(vocabulary):
mappings[symbol] = i
# save voabulary to a json file
with open(mapping_path, "w") as fp:
json.dump(mappings, fp, indent=4)
def convert_songs_to_int(songs):
int_songs = []
# load mappings
with open(MAPPING_PATH, "r") as fp:
mappings = json.load(fp)
# transform songs string to list
songs = songs.split()
# map songs to int
for symbol in songs:
int_songs.append(mappings[symbol])
return int_songs
def generate_training_sequences(sequence_length):
"""Create input and output data samples for training. Each sample is a sequence.
:param sequence_length (int): Length of each sequence. With a quantisation at 16th notes, 64 notes equates to 4 bars
:return inputs (ndarray): Training inputs
:return targets (ndarray): Training targets
"""
# load songs and map them to int
songs = load(SINGLE_FILE_DATASET)
int_songs = convert_songs_to_int(songs)
inputs = []
targets = []
# generate the training sequences
num_sequences = len(int_songs) - sequence_length
for i in range(num_sequences):
inputs.append(int_songs[i:i+sequence_length])
targets.append(int_songs[i+sequence_length])
# one-hot encode the sequences
vocabulary_size = len(set(int_songs))
# inputs size: (# of sequences, sequence length, vocabulary size)
inputs = keras.utils.to_categorical(inputs, num_classes=vocabulary_size)
targets = np.array(targets)
print(f"There are {len(inputs)} sequences.")
return inputs, targets
def main():
preprocess(KERN_DATASET_PATH)
songs = create_single_file_dataset(SAVE_DIR, SINGLE_FILE_DATASET, SEQUENCE_LENGTH)
create_mapping(songs, MAPPING_PATH)
inputs, targets = generate_training_sequences(SEQUENCE_LENGTH)
if __name__ == "__main__":
main()