Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
>>> timespan
>
>>> timespan.part
>>> timespan = scoreTree.findNextPitchedTimespanInSameStreamByClass(timespan)
>>> timespan
>
>>> timespan.part
'''
from music21 import stream
if classList is None:
classList = (stream.Part,)
if not isinstance(pitchedTimespan, spans.PitchedTimespan):
message = 'PitchedTimespan {!r}, must be an PitchedTimespan'.format(pitchedTimespan)
raise TimespanTreeException(message)
verticality = self.getVerticalityAt(pitchedTimespan.offset)
while verticality is not None:
verticality = verticality.nextVerticality
if verticality is None:
return None
for nextPitchedTimespan in verticality.startTimespans:
if (nextPitchedTimespan.getParentageByClass(classList) is
pitchedTimespan.getParentageByClass(classList)):
return nextPitchedTimespan
def rhythmLine(baseNote=None, minLength=8.0, maxProbability=0.5):
if baseNote is None:
baseNote = note.Note(type='quarter')
newStream = stream.Part()
while newStream.duration.quarterLength < minLength:
currentProbability = (newStream.duration.quarterLength / minLength) * maxProbability
newNote = copy.deepcopy(baseNote)
x = random.random()
while x < currentProbability:
# print(x, currentProbability)
newNote.duration = alterRhythm(newNote.duration)
currentProbability *= 0.75
x = random.random()
y = random.random()
z = random.random()
if z < 0.5:
direction = 1
startingPitch = 'C1'
):
totalLoops = totalLoops * 1.01
jMax = loopLength * totalLoops
p = pitch.Pitch(startingPitch)
if isinstance(scaleType, scale.Scale):
octo = scaleType
else:
octo = scaleType(p)
s = stream.Score()
s.metadata = metadata.Metadata()
s.metadata.title = 'Pendulum Waves'
s.metadata.composer = 'inspired by http://www.youtube.com/watch?v=yVkdfJ9PkRQ'
parts = [stream.Part(), stream.Part(), stream.Part(), stream.Part()]
parts[0].insert(0, clef.Treble8vaClef())
parts[1].insert(0, clef.TrebleClef())
parts[2].insert(0, clef.BassClef())
parts[3].insert(0, clef.Bass8vbClef())
for i in range(totalParts):
j = 1.0
while j < (jMax + 1.0):
ps = p.ps
if ps > 84:
active = 0
elif ps >= 60:
active = 1
elif ps >= 36:
active = 2
elif ps < 36:
active = 3
def testMicrotonalOutputD(self):
# test instrument assignments with microtones
from music21 import instrument, stream, note
iList = [instrument.Harpsichord, instrument.Viola,
instrument.ElectricGuitar, instrument.Flute]
# number of notes, ql, pitch
pmtr = [(8, 1, ['C6']), (4, 2, ['G3', 'G~3']), (2, 4, ['E4', 'E5']), (6, 1.25, ['C5'])]
s = stream.Score()
for i, inst in enumerate(iList):
p = stream.Part()
p.insert(0, inst()) # must call instrument to create instance
number, ql, pitchNameList = pmtr[i]
for j in range(number):
p.append(note.Note(pitchNameList[j%len(pitchNameList)], quarterLength=ql))
s.insert(0, p)
#s.show('midi')
mts = streamHierarchyToMidiTracks(s)
#print(mts[0])
self.assertEqual(mts[0].getChannels(), [1])
self.assertEqual(mts[0].getProgramChanges(), [6])
self.assertEqual(mts[1].getChannels(), [2, 5])
self.assertEqual(mts[1].getProgramChanges(), [41])
m3.append(DaCapo())
s.append([m1, m2, m3])
ex = repeat.Expander(s)
self.assertEqual(ex.isExpandable(), True)
# missing segno
s = stream.Part()
m1 = stream.Measure()
m2 = stream.Measure()
m3 = stream.Measure()
m3.append(DalSegno())
s.append([m1, m2, m3])
ex = repeat.Expander(s)
self.assertEqual(ex.isExpandable(), False)
s = stream.Part()
m1 = stream.Measure()
m2 = stream.Measure()
m2.append(Segno())
m3 = stream.Measure()
m3.append(DalSegno())
s.append([m1, m2, m3])
ex = repeat.Expander(s)
self.assertEqual(ex.isExpandable(), True)
# dc al fine
s = stream.Part()
m1 = stream.Measure()
m2 = stream.Measure()
m2.append(Fine())
m3 = stream.Measure()
m3.append(DaCapoAlFine())
def testPartReductionE(self):
'''Artificially create test cases.
'''
from music21 import dynamics, analysis
s = stream.Score()
p1 = stream.Part()
p1.id = 0
p2 = stream.Part()
p2.id = 1
for ql in [2, 2, False, 2, False, 2]:
if ql:
p1.append(note.Note(quarterLength=ql))
p2.append(note.Note(quarterLength=ql))
else:
p1.append(note.Rest(quarterLength=2))
p2.append(note.Rest(quarterLength=2))
for pos, dyn in [(0, 'p'), (2, 'fff'), (6, 'ppp')]:
p1.insert(pos, dynamics.Dynamic(dyn))
for pos, dyn in [(0, 'mf'), (2, 'f'), (6, 'mf')]:
p2.insert(pos, dynamics.Dynamic(dyn))
p1.makeMeasures(inPlace=True)
p2.makeMeasures(inPlace=True)
s.insert(0, p1)
s.insert(0, p2)
environLocal.printDebug('*** processing a ')
# That's an outright lie. We're also processing , , and other elements!
# Get a tuple of all the @n attributes for the tags in this score. Each tag
# corresponds to what will be a music21 Part.
allPartNs = allPartsPresent(elem)
# This is the actual processing.
parsed = sectionScoreCore(elem, allPartNs, slurBundle=slurBundle)[0]
# Convert the dict to a Score
# We must iterate here over "allPartNs," which preserves the part-order found in the MEI
# document. Iterating the keys in "parsed" would not preserve the order.
environLocal.printDebug('*** making the Score')
theScore = [stream.Part() for _ in range(len(allPartNs))]
for i, eachN in enumerate(allPartNs):
for eachObj in parsed[eachN]:
theScore[i].append(eachObj)
theScore = stream.Score(theScore)
# put slurs in the Score
theScore.append(slurBundle.list)
# TODO: when all the Slur objects are are at the end, they'll only be outputted properly if the
# whole Score is outputted. show()-ing one Part or Measure won't display the slurs.
return theScore
bassLine.append(copy.deepcopy(bassNote))
rhPitches = possibA[0:-1]
rhChord = chord.Chord(rhPitches)
rhChord.quarterLength = self._segmentList[segmentIndex].quarterLength
rightHand.append(rhChord)
rightHand.insert(0.0, clef.TrebleClef())
rightHand.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
if r is not None:
rightHand[0].pop(3)
rightHand[0].padAsAnacrusis()
else: # Chorale-style output
upperParts = []
for partNumber in range(len(possibilityProgression[0]) - 1):
fbPart = stream.Part()
sol.insert(0.0, fbPart)
fbPart.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
if r is not None:
fbPart.append(copy.deepcopy(r))
upperParts.append(fbPart)
for segmentIndex in range(len(self._segmentList)):
possibA = possibilityProgression[segmentIndex]
bassNote = self._segmentList[segmentIndex].bassNote
bassLine.append(copy.deepcopy(bassNote))
for partNumber in range(len(possibA) - 1):
n1 = note.Note(possibA[partNumber])
n1.quarterLength = self._segmentList[segmentIndex].quarterLength
upperParts[partNumber].append(n1)
def rhythmLine(baseNote=None, minLength=8.0, maxProbability=0.5):
if baseNote is None:
baseNote = note.Note(type='quarter')
newStream = stream.Part()
while newStream.duration.quarterLength < minLength:
currentProbability = (newStream.duration.quarterLength / minLength) * maxProbability
newNote = copy.deepcopy(baseNote)
x = random.random()
while x < currentProbability:
# print(x, currentProbability)
newNote.duration = alterRhythm(newNote.duration)
currentProbability *= 0.75
x = random.random()
y = random.random()
z = random.random()
if z < 0.5:
direction = 1
def realizeFiguredBass(figuredBassList, scaleValue, scaleMode = 'major'):
sopranoLine = stream.Part()
altoLine = stream.Part()
tenorLine = stream.Part()
bassLine = stream.Part()
fbScale = figuredBassScale.FiguredBassScale(scaleValue, scaleMode)
(firstBassNote, firstNotation) = figuredBassList.pop(0)
startPossibilities = getStartingPitches(fbScale, firstBassNote.pitch, firstNotation)
#startPossibilities = [[pitch.Pitch('E5'), pitch.Pitch('G4'), pitch.Pitch('C4'), pitch.Pitch('C3')]]
allPossibilities = [startPossibilities]
allPossibleMovements = []
prevPossibilities = startPossibilities
for (nextBassNote, nextNotation) in figuredBassList:
nextBass = nextBassNote.pitch
(nextPossibilities, nextMovements) = getNextPossibilities(fbScale, prevPossibilities, nextBass, nextNotation)