Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import time
import logging
from ledfx.effects import Effect
from threading import Thread
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DEFAULT_RATE = 1.0 / 60.0
@Effect.no_registration
class TemporalEffect(Effect):
_thread_active = False
_thread = None
CONFIG_SCHEMA = vol.Schema({
vol.Optional('speed', default = 1.0, description="Speed of the effect"): vol.Coerce(float)
})
def thread_function(self):
while self._thread_active:
startTime = time.time()
# Treat the return value of the effect loop as a speed modifier
# such that effects that are nartually faster or slower can have
# a consistent feel.
sleepInterval = self.effect_loop()
if filtered is True:
return math.interpolate(self.melbank_filtered(), size)
return math.interpolate(self.melbank(), size)
# TODO: Rationalize
_legacy_melbank_source = None
def get_legacy_melbank_input_source(ledfx):
global _legacy_melbank_source
if _legacy_melbank_source is None:
_legacy_melbank_source = LegacyMelbankInputSource(ledfx, ledfx.config.get('audio', {}))
return _legacy_melbank_source
@Effect.no_registration
class LegacyAudioReactiveEffect(Effect):
"""
Base for audio reactive effects. This really just subscribes
to the melbank input source and forwards input along to the
subclasses. This can be expaneded to do the common r/g/b filters.
"""
def activate(self, channel):
super().activate(channel)
get_legacy_melbank_input_source(self._ledfx).subscribe(
self._audio_data_updated)
def deactivate(self):
get_legacy_melbank_input_source(self._ledfx).unsubscribe(
self._audio_data_updated)
super().deactivate()
from ledfx.effects.temporal import TemporalEffect
from ledfx.color import COLORS, GRADIENTS
from ledfx.effects import Effect
import voluptuous as vol
import numpy as np
import logging
_LOGGER = logging.getLogger(__name__)
@Effect.no_registration
class GradientEffect(Effect):
"""
Simple effect base class that supplies gradient functionality. This
is intended for effect which instead of outputing exact colors output
colors based upon some configured color pallet.
"""
CONFIG_SCHEMA = vol.Schema({
vol.Optional('gradient_name', description='Preset gradient name', default = 'Spectral'): vol.In(list(GRADIENTS.keys())),
vol.Optional('gradient_roll', description='Amount to shift the gradient', default = 0): vol.Coerce(int),
})
_gradient_curve = None
def _comb(self, N, k):
N = int(N)
k = int(k)
return self.common_filter.update(self.melbank())
def sample_melbank(self, hz):
"""Samples the melbank curve at a given frequency"""
return np.interp(hz, self.melbank_frequencies, self.melbank())
@lru_cache(maxsize=32)
def interpolated_melbank(self, size, filtered = True):
"""Returns a melbank curve interpolated up to a given size"""
if filtered is True:
return math.interpolate(self.melbank_filtered(), size)
return math.interpolate(self.melbank(), size)
@Effect.no_registration
class AudioReactiveEffect(Effect):
"""
Base for audio reactive effects. This really just subscribes
to the melbank input source and forwards input along to the
subclasses. This can be expanded to do the common r/g/b filters.
"""
def activate(self, channel):
_LOGGER.info('Activating AudioReactiveEffect.')
super().activate(channel)
if not self._ledfx.audio or id(MelbankInputSource) != id(self._ledfx.audio.__class__):
self._ledfx.audio = MelbankInputSource(self._ledfx, self._ledfx.config.get('audio', {}))
self.audio = self._ledfx.audio
self._ledfx.audio.subscribe(
self._audio_data_updated)