Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self):
persistent_attrs_init(self)
self.synth = Fluidsynth()
self.devicechains = [DeviceChain(self, i) for i in range(16)]
self.grids = []
self.cpu = CPU(self)
self.clock = Clock(self)
self.looper = Looper(self)
self.mixer = Mixer(self)
self.detect_devices()
# FIXME: probably make this configurable somehow (env var...?)
if False:
from termpad import ASCIIGrid
self.grids.append(ASCIIGrid(self, 0, 1))
_, batched_tgt_sentences = decoder.feed_dict(tgt_sentences, len(src_sentences), batch_size, feed_dicts)
feed_dropout_and_train(feed_dicts, dropout_placeholder,
args.dropout_keep_prob, training_placeholder, train)
if tgt_sentences:
postprocessed_tgt = [[postedit(s) for s in batch] for batch in batched_tgt_sentences]
else:
postprocessed_tgt = None
return feed_dicts, batched_src_sentences, postprocessed_tgt
trainer = CrossEntropyTrainer(decoder, args.l2_regularization)
if args.mixer:
xent_calls, moving_calls = args.mixer
trainer = Mixer(decoder, trainer, xent_calls, moving_calls)
log("Initializing the TensorFlow session.")
sess = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=4,
intra_op_parallelism_threads=4))
sess.run(tf.initialize_all_variables())
val_feed_dicts, batched_val_src_sentences, batched_val_tgt_sentences = \
get_feed_dicts(val_src_sentences, val_tgt_sentences,
1 if args.beamsearch else args.batch_size, train=False)
train_feed_dicts, batched_train_src_sentences, batched_train_tgt_sentences = \
get_feed_dicts(train_src_sentences, train_tgt_sentences, args.batch_size, train=True)
if args.test_output_file:
test_feed_dicts, batched_test_src_sentences, _ = \
get_feed_dicts(test_src_sentences, None,
kwargs = get_mixer_args(subject, xfm, types)
if hasattr(data, "get_affine"):
#this is a nibabel file -- it has the nifti headers intact!
if isinstance(xfm, str):
kwargs['coords'] = db.surfs.getCoords(subject, xfm, hemisphere=hemisphere, magnet=data.get_affine())
data = data.get_data()
elif isinstance(xfm, np.ndarray):
ones = np.ones(len(interp[0](0)))
coords = [np.dot(xfm, np.hstack([i(0), ones]).T)[:3].T for i in interp ]
kwargs['coords'] = [ c.round().astype(np.uint32) for c in coords ]
kwargs['data'] = data
import mixer
m = mixer.Mixer(**kwargs)
m.edit_traits()
return m
def incIgainVolume(self):
self.igainVolume += 5
if self.igainVolume > 100: self.igainVolume = 100
os.system('aumix -i+5 &> /dev/null &')
def setOgainVolume(self, volume):
"""For Ogain on SB Live Cards"""
if volume > 100: volume = 100
elif volume < 0: volume = 0
self.ogainVolume = volume
os.system('aumix -o%s &> /dev/null &' % volume)
# Simple test...
if __name__ == '__main__':
mixer = Mixer()
mixer.setPcmVolume(50)
"display" : "Min Tempo",
"optional" : True,
"description": "the minimum tempo. If omitted, there is no minimum"
},
"max_tempo": {
"type" : "number",
"display" : "Max Tempo",
"optional" : True,
"description": "the maximum tempo. If omitted, there is no maxiumum"
}
}
},
{
"name" : "Mixer",
"display": "mixer",
"class": mixer.Mixer,
"type" : "combiner",
"description": "Mixes input tracks while maintaining a set of rules.",
"help" : """ This component will mix tracks from the various input
streams, while maintaining a set of rules that govern how the tracks
will be ordered.
<br>
Input streams are on the <b> green </b> port, banned tracks
are on the <b> red</b> port and banned artists are on
the <b> orange </b> port. If <b> fail fast </b> is set, then the
order of the input tracks is guaranteed to be preserved and the
mixer will stop producing tracks when it is no longer able to
guarantee the contraints. If <b> fail fast </b> is not set, then
the mixer will find the next best track on the next input stream
that best fits the current constraints and will continue to produce
tracks as long as any stream is producing tracks.
""",
def get_singleton():
global _singleton
# One-time init
if _singleton == None:
_singleton = Mixer()
return _singleton
Daemon()
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logging.root.addHandler(customlog.MultiprocessingStreamHandler())
log = logging.getLogger(config.log_name)
log.info("Starting %s...", config.app_name)
track_queue = multiprocessing.Queue(1)
log.info("Initializing read queue to hold %2.2f seconds of audio.",
config.frontend_buffer)
v2_queue = BufferedReadQueue(int(config.frontend_buffer / SECONDS_PER_FRAME))
info_queue = multiprocessing.Queue()
mixer = Mixer(iqueue=track_queue,
oqueues=(v2_queue.raw,),
infoqueue=info_queue)
mixer.start()
if stream:
import brain
Hotswap(track_queue.put, brain).start()
Hotswap(InfoHandler.add, info, 'generate', info_queue, first_frame).start()
Hotswap(MonitorSocket.update,
statistician, 'generate',
lambda: StreamHandler.relays,
InfoHandler.stats,
mp3_queue=v2_queue).start()
tornado.ioloop.PeriodicCallback(
lambda: restart.check('restart.txt',