Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self,
host: str = None,
port: int = 14322,
clip: VideoNode = None,
threads: int = 0,
log_level: Union[str,
LL] = 'info') -> None:
""" Constructor for Server. """
self.log_level = Util().as_enum(log_level) if isinstance(
log_level, str) else log_level
if not isinstance(clip, VideoNode):
Util().message('crit', 'argument "clip" has wrong type.')
sys.exit(2)
self.clip = clip
self.threads = core.num_threads if threads == 0 else threads
self.frame_queue_buffer = dict()
self.client_connected = False
self.soc = socket.socket(
Util().get_proto_version(host),
socket.SOCK_STREAM)
self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
Util().message('info', 'socket created.')
try:
self.soc.bind((host, port))
Util().message('info', 'socket bind complete.')
except socket.error:
Util().message('crit', f'bind failed. Error: {sys.exc_info()}')
def __init__(self, clip):
self.core = vs.get_core()
self.clip = clip
if not isinstance(clip, vs.VideoNode):
raise TypeError(MODULE_NAME + ': clip is invalid.')
self.clip_width = clip.width
self.clip_height = clip.height
self.clip_bits = clip.format.bits_per_sample
self.clip_color_family = clip.format.color_family
self.clip_sample_type = clip.format.sample_type
self.clip_id = clip.format.id
self.clip_subsample_w = clip.format.subsampling_w
self.clip_subsample_h = clip.format.subsampling_h
self.clip_is_gray = True if clip.format.num_planes == 1 else False
# Register format for GRAY10
vs.GRAY10 = self.core.register_format(vs.GRAY, vs.INTEGER, 10, 0, 0).id
src_sh = src_f.subsampling_h
dst_st = vs.INTEGER if bits < 32 else vs.FLOAT
if isinstance(range, str):
range = RANGEDICT[range]
if isinstance(range_in, str):
range_in = RANGEDICT[range_in]
if (src_bits, range_in) == (bits, range):
return src
out_f = core.register_format(src_cf, dst_st, bits, src_sw, src_sh)
return core.resize.Point(src, format=out_f.id, dither_type=dither_type, range=range, range_in=range_in)
TYPEDICT = {vs.VideoNode: 'a clip', int: 'an int', float: 'a float', bool: 'a bool', str: 'a str', list: 'a list'}
RANGEDICT = {'limited': 0, 'full': 1}
def TempLinearApproximateMC(clip, meclip = None, radius = 2, planes = None, subpel = 4, subpelinterp = None, dct = None, refine = False, blocksize = None,
overlap = None, search = None, searchparam = None, pelsearch = None, chromamotion = True, truemotion = True, _lambda = None, lsad = None, pnew = None,
plevel = None, globalmotion = True, thsad = None, thscd1 = None, thscd2 = None, badrange = None, isse = None, gamma = None):
#-------------------------------------------------------------------------------------------------------------------------------------
core = vs.get_core()
if not isinstance(clip, vs.VideoNode) or clip.format.id != vs.YUV420P8:
raise ValueError('MCDenoise.TempLinearApproximate: Input format must be YUV420P8.')
if not isinstance(radius, int) or radius < 1:
raise ValueError("MCDenoise.TempLinearApproximate: 'radius' must be larger than 1.")
#-------------------------------------------------------------------------------------------------------------------------------------
tla_arguments = {"radius": radius, "planes": planes, "gamma": gamma}
super_arguments = {"pel": subpel, "sharp": subpelinterp}
compensate_arguments = {"thsad":thsad, "thscd1":thscd1, "thscd2":thscd2}
recalculate_arguments = {"chroma":chromamotion, "search":search, "searchparam":searchparam, "truemotion":truemotion, "lambda":_lambda}
analyze_arguments = {
"chroma":chromamotion,
"search":search,
"searchparam":searchparam,
"truemotion":truemotion,
"lambda":_lambda,
def _init_registry(self):
from vapoursynth import VideoNode, VideoFrame
from yuuno.vs.clip import VapourSynthClip, VapourSynthFrame
from yuuno.vs.clip import VapourSynthAlphaClip
registry = Registry()
registry.register(VapourSynthClip, VideoNode)
registry.register(VapourSynthFrame, VideoFrame)
registry.register(VapourSynthAlphaClip, AlphaOutputClip)
if is_version(43):
# Required so that IPython automatically supports alpha outputs
from vapoursynth import AlphaOutputTuple
registry.register(VapourSynthAlphaClip, AlphaOutputTuple)
return registry
def YUVPreview(clips, matrix_s, writelist = [], matrix=None, full=None, depth=None,\
dither=None, kernel=None, a1=None, a2=None, prefer_props=None, drawtext=True, alignment = 7):
# Set VS core and function name
funcName = "YUVPreview"
if len(writelist) != len(clips):
writelist = [None] * len(clips)
if isinstance(clips, list):
#Remove all Nones from the clip list
tlist = []
n = 0
for c in clips:
if c == None:
del writelist[n]
else:
n = n + 1
if not isinstance(c, vs.VideoNode):
raise TypeError(funcName + ': \"clips\" must be a list of clips!')
tlist.append(c)
clips = tlist
ref = clips[0]
else:
raise TypeError(funcName + ': \"clips\" must be a list of clips!')
# Get properties of output clip
if depth is None:
depth = 10
elif not isinstance(depth, int):
raise TypeError(funcName + ': \"depth\" must be an int!')
if depth >= 32:
sample = vs.FLOAT
else:
sample = vs.INTEGER
return VapourSynthFrameWrapper(frame=frame, compat_frame=compat.get_frame(0))
class VapourSynthClip(VapourSynthClipMixin, HasTraits):
clip: VideoNode = Instance(VideoNode)
def __init__(self, clip):
super(VapourSynthClip, self).__init__(clip=clip)
class VapourSynthFrame(VapourSynthClipMixin, HasTraits):
frame: VideoFrame = Instance(VideoFrame)
clip: VideoNode = Instance(VideoNode, allow_none=True)
def __init__(self, frame):
HasTraits.__init__(self, frame=frame)
@observe("frame")
def _frame_observe(self, value):
self.clip = self._wrap_frame(value['new'])
class VapourSynthAlphaFrameWrapper(HasTraits):
clip: VapourSynthFrameWrapper = Instance(VapourSynthFrameWrapper)
alpha: VapourSynthFrameWrapper = Instance(VapourSynthFrameWrapper)
_cache: Image.Image = Instance(Image.Image, allow_none=True)
@property
def __init__(self, main: AbstractMainWindow) -> None:
super().__init__(main, 'Pipette')
self.setup_ui()
self.main.graphics_view.mouseMoved.connect(self.mouse_moved)
self.pos_fmt = '{},{}'
self.src_hex_fmt = '{:2X}'
self.src_max_val: Union[int, float] = 2**8 - 1
self.src_dec_fmt = '{:3d}'
self.src_norm_fmt = '{:0.5f}'
self.outputs: Dict[Output, vs.VideoNode] = {}
set_qobject_names(self)
@inlines.register(VideoNode, format="image/png")
@inlines.register(VideoFrame, format="image/png")
def video_converter(obj):
img = converters.convert(obj, frame_no=0)
return image_to_bytes(img), {
'width': img.width,
'height': img.height,
'unconfined': True
}
def GetMatrix(clip, matrix=None, dIsRGB=None, id=False):
# Set VS core and function name
core = vs.get_core()
funcName = 'GetMatrix'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Get properties of input clip
sFormat = clip.format
sColorFamily = sFormat.color_family
sIsRGB = sColorFamily == vs.RGB
sIsYUV = sColorFamily == vs.YUV
sIsGRAY = sColorFamily == vs.GRAY
sIsYCOCG = sColorFamily == vs.YCOCG
if sColorFamily == vs.COMPAT:
raise ValueError(funcName + ': Color family *COMPAT* is not supported!')
# Get properties of output clip
if dIsRGB is None:
dIsRGB = not sIsRGB