-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathcanon-gen.py
428 lines (349 loc) · 14.1 KB
/
canon-gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
import music21
import scamp
import copy
import random
# define some readability constants
SINGLE_NOTE = 0
DOUBLE_NOTE = 1
methods = [SINGLE_NOTE, DOUBLE_NOTE]
ODD_UPPER = True
ODD_LOWER = False
VOICE1 = 0
VOICE2 = 1
VOICE3 = 2
VOICE4 = 3
VOICE5 = 4
def pairwise(iterable):
"""
takes a list and returns a new list containing the elements pairwise
with overlap
s -> (s[0], s[1]), (s[1], s[2]), (s[2], s[3]), ..., (s[Last], None)
"""
return list(zip(iterable, iterable[1:])) + [(iterable[-1], None)]
def median(lst, if_even_length_use_upper_element=False):
""" return the median of a list """
length = len(lst)
if length == 0:
return None
if length == 1:
return lst[0]
if length % 2 != 0:
# median of a list with odd lenght is well-defined
return lst[(length - 1) // 2]
else:
# median of a list with even length is a bit tricky
if not if_even_length_use_upper_element:
return lst[(length - 1) // 2]
else:
return lst[(length) // 2]
def realize_chord(chordstring, numofpitch=3, baseoctave=4, direction="ascending"):
"""
given a chordstring like Am7, return a list of numofpitch pitches, starting in octave baseoctave, and ascending
if direction == "descending", reverse the list of pitches before returning them
"""
pitches = music21.harmony.ChordSymbol(chordstring).pitches
num_iter = numofpitch // len(pitches) + 1
octave_correction = baseoctave - pitches[0].octave
result = []
actual_pitches = 0
for i in range(num_iter):
for p in pitches:
if actual_pitches < numofpitch:
newp = copy.deepcopy(p)
newp.octave = newp.octave + octave_correction
result.append(newp)
actual_pitches += 1
else:
if direction == "ascending":
return result
else:
result.reverse()
return result
octave_correction += 1
if direction == "ascending":
return result
else:
result.reverse()
return result
class Identity(object):
"""
transformation that transforms a note into itself
when selecting transformations at random, it is
useful to have some identity transformations taking
place in order not to make the score too busy
"""
def __init__(self):
pass
def transform(self, scale, note, note2=None):
new_stream = music21.stream.Stream()
new_stream.append(note)
return new_stream
class OneToThree(object):
"""
transformation that randomly transforms one note into three notes
* total duration is kept
* first note and last note equal the original note
* middle note is the neighbour note
"""
def __init__(self):
pass
def transform(self, scale, note):
new_note = copy.deepcopy(note)
if new_note.isRest:
new_stream = music21.stream.Stream()
new_stream.append(new_note)
return new_stream
possible_durations = [ # [ 1.0/3, 1.0/3, 1.0/3],
[0.5, 0.25, 0.25],
[0.25, 0.5, 0.25],
[0.25, 0.25, 0.5]
]
possible_steps = [
"ascending",
"descending"
]
chosen_dur = random.choice(possible_durations)
chosen_step = random.choice(possible_steps)
new_note.quarterLength = chosen_dur[0] * note.quarterLength
new_stream = music21.stream.Stream()
new_stream.append(new_note)
new_note2 = music21.note.Note()
next_pitch = scale.next(new_note.pitch, direction=chosen_step)
new_note2.pitch = next_pitch
new_note2.quarterLength = chosen_dur[1] * note.quarterLength
new_stream.append(new_note2)
new_note3 = copy.deepcopy(new_note)
new_note3.quarterLength = chosen_dur[2] * note.quarterLength
new_stream.append(new_note3)
return new_stream
class TwoToThree(object):
"""
transformation that looks at current and next note,
and interpolates a note in between (a generalization of the concept
of a "passing" note)
total duration doesn't change: duration of current note is
spread over a copy of the current note and an interpolated note
"""
def __init__(self):
pass
def transform(self, scale, note1, note2):
new_note = copy.deepcopy(note1)
if note2 is None:
stream = music21.stream.Stream()
stream.insert(0, new_note)
return stream
if new_note.isRest:
new_stream = music21.stream.Stream()
new_stream.append(new_note)
return new_stream
pitches = scale.getPitches(new_note.pitch, note2.pitch)
rounding_strategy = random.choice([ODD_UPPER, ODD_LOWER])
possible_durations = [ # [ 1.0/3, 2.0/3],
# [ 2.0/3, 1.0/3],
[0.5, 0.5],
[0.75, 0.25],
# [ 0.25, 0.75 ]
]
chosen_dur = random.choice(possible_durations)
new_note.quarterLength = chosen_dur[0] * note1.quarterLength
new_stream = music21.stream.Stream()
new_stream.append(new_note)
new_note2 = copy.deepcopy(new_note)
new_note2.pitch = median(pitches, rounding_strategy)
new_note2.quarterLength = chosen_dur[1] * note1.quarterLength
new_stream.append(new_note2)
return new_stream
class TwoToFour(object):
"""
transformation that looks at next note,
creates notes oscillates a single scale degree
above and below the next note, and uses those
notes in the current beat (kind of cambiata?)
"""
def __init__(self):
pass
def transform(self, scale, note1, note2):
new_note = copy.deepcopy(note1)
if note2 is None:
stream = music21.stream.Stream()
stream.insert(0, new_note)
return stream
if new_note.isRest:
new_stream = music21.stream.Stream()
new_stream.append(new_note)
return new_stream
possible_durations = [
[0.5, 0.25, 0.25],
[0.25, 0.5, 0.25]
]
chosen_dur = random.choice(possible_durations)
possible_directions = [
"ascending",
"descending"
]
chosen_direction = random.choice(possible_directions)
other_direction = list(set(possible_directions) - set([chosen_direction]))[0]
new_note.quarterLength = chosen_dur[0] * note1.quarterLength
new_stream = music21.stream.Stream()
new_stream.append(new_note)
new_note2 = copy.deepcopy(note2)
new_note2.pitch = scale.next(note2.pitch, direction=chosen_direction)
new_note2.quarterLength = chosen_dur[1] * note1.quarterLength
new_stream.append(new_note2)
new_note3 = copy.deepcopy(note2)
new_note3.pitch = scale.next(note2.pitch, direction=other_direction)
new_note3.quarterLength = chosen_dur[2] * note1.quarterLength
new_stream.append(new_note3)
return new_stream
# list of transformations that transform a single note to a series of new notes
# identity is listed more than once to increase the chance of it getting chosen
single_note_transformers = [Identity,
Identity,
OneToThree
]
# list of transformations that transform a single note based on both current and next note
# idenity is listed more than once to give it more chance of being chosen
double_note_transformers = [Identity,
Identity,
TwoToThree,
TwoToFour,
]
def spiceup_streams(streams, scale, repetitions=1):
"""
function that takes a stream of parts
and spices up every part using the
Identity, OneToThree, TwoToThree, TwoToFour, ...
transformations
* it requires a scale in which to interpret the streams
* it can create "repetitions" spiced sequences of the given stream
"""
newtotalstream = music21.stream.Stream()
for i, part in enumerate(streams):
newstream = music21.stream.Stream()
for x in range(repetitions):
for note, nextnote in pairwise(part.notesAndRests):
new_note = copy.deepcopy(note)
new_nextnote = copy.deepcopy(nextnote)
method = random.choice(methods)
if method == SINGLE_NOTE:
trafo = random.choice(single_note_transformers)()
newstream.append(trafo.transform(scale, new_note).flat.elements)
elif method == DOUBLE_NOTE:
trafo = random.choice(double_note_transformers)()
newstream.append(trafo.transform(scale, new_note, new_nextnote).flat.elements)
newtotalstream.insert(0, newstream)
return newtotalstream
def serialize_stream(stream, repeats=1):
"""
function that takes a stream of parallel parts
and returns a stream with all parts sequenced one after the other
"""
new_stream = music21.stream.Stream()
copies = len(stream)
for i in range(copies):
for part in reversed(stream):
length = part.duration.quarterLength
new_stream.append(copy.deepcopy(part.flat.elements))
return new_stream, length
def notate_voice(part, initial_rest, notesandrests):
if initial_rest:
#print(f"{initial_rest=}")
scamp.wait(initial_rest)
NOTE = type(music21.note.Note())
REST = type(music21.note.Rest())
for event in notesandrests:
if type(event) == NOTE:
#print(f"{event=}, {event.quarterLength=}, {event.pitch.midi=}")
part.play_note(event.pitch.midi, 0.7, event.quarterLength)
elif type(event) == REST:
#print(f"{event=}")
scamp.wait(event.quarterLength)
def canon(serialized_stream, delay, voices, extra_transposition_map={}, tempo=120):
"""
function that takes serialized stream and sequences it against
itself voices times with a delay "delay"
"""
s = scamp.Session(tempo=tempo)
s.fast_forward_in_beats(10000)
parts = [s.new_part("piano") for _ in range(voices)]
s.start_transcribing()
initial_rests = [i * delay for i in range(voices)]
for v in range(voices):
interval = extra_transposition_map[v]
scamp.fork(notate_voice, args=(
parts[v], initial_rests[v], copy.deepcopy(serialized_stream).transpose(interval).flat.notesAndRests))
s.wait_for_children_to_finish()
performance = s.stop_transcribing()
return performance
if __name__ == "__main__":
############################################################################
#
# START OF USER EDITABLE CODE
#
############################################################################
# define a chord progression that serves as basis for the canon (change this!)
path_to_musescore = '' # change as needed; leave empty to use default settings
chords = "C F Am Dm G C"
# scale in which to interpret these chords
scale = music21.scale.MajorScale("C")
# realize the chords using the given number of voices (e.g. 4)
voices = 5
# realize the chords in octave 4 (e.g. 4)
octave = 4
# realize the chords using half notes (e.g. 1 for a whole note)
quarterLength = 2
# number of times to spice-up the streams (e.g. 2)
spice_depth = 1
# how many instances of the same chords to stack (e.g. 2)
stacking = 1
# define extra transpositions for different voices (e.g. +12, -24, ...)
# note that the currently implemented method only gives good results with multiples of 12
voice_transpositions = {VOICE1: 0, VOICE2: 0, VOICE3: -12, VOICE4: -24, VOICE5: -12}
############################################################################
#
# END OF USER EDITABLE CODE
#
############################################################################
# prepare some streams: one per voice
# all bass notes of each chord form one voice
# all 2nd notes of each chord form a second voice
# ...
# convert chords to notes and stuff into a stream
streams = {}
splitted_chords = chords.split(" ")
for v in range(voices):
streams[v] = music21.stream.Stream()
# split each chord into a separate voice
for c in splitted_chords:
pitches = realize_chord(c, voices, octave, direction="descending")
for v in range(voices):
note = music21.note.Note(pitches[v])
note.quarterLength = quarterLength
streams[v].append(note)
# combine all voices to one big stream
totalstream = music21.stream.Stream()
for r in range(stacking):
for s in streams:
totalstream.insert(0, copy.deepcopy(streams[s]))
# add some spice to the boring chords. sugar and spice is always nice
spiced_streams = [totalstream]
for s in range(spice_depth):
# each iteration spices up the stream that was already spiced up in the previous iteration,
# leading to spicier and spicier streams
spiced_streams.append(spiceup_streams(spiced_streams[s], scale))
# debug code: visualize the spiced up chords, and allow the user to abort
# canon generation if the result is too horrible
if path_to_musescore:
music21.environment.set('musicxmlPath', path_to_musescore)
spiced_streams[-1].show("musicxml")
answer = None
while answer not in ['y', 'Y', 'n', 'N']:
answer = input("continue to generate canon from this spiced up chord progression? [y/n]: ")
if answer in ['y', 'Y']:
# unfold the final spiced up chord progression into a serialized stream
ser, delay = serialize_stream(spiced_streams[-1])
# ser.show('musicxml')
# and turn it into a canon. Add extra transpositions to some voices to create some diversity
canonized = canon(ser, delay, voices * stacking, voice_transpositions)
# show the final product
canonized.to_score(title="Canon", composer="canon-generator.py", max_divisor=16).show_xml()