Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def correct_luma_outliers(img, n=2, perc=.2, sta=None):
# status init
sta = sta if sta is not None else misc.PlenopticamStatus()
sta.status_msg('Hot pixel removal', True)
# luma channel conversion
luma = misc.yuv_conv(img.copy())[..., 0]
for i in range(n, luma.shape[0]-n):
for j in range(n, luma.shape[1]-n):
win = luma[i-n:i+n+1, j-n:j+n+1]
# hot pixel detection
num_hi = len(win[win > luma[i, j]*(1-perc)])
# dead pixel detection
num_lo = len(win[win < luma[i, j]*(1+perc)])
if num_hi < win.size/5 or num_lo < win.size/5:
# replace outlier by average of all directly adjacent pixels
img[i, j, :] = (sum(sum(img[i-1:i+2, j-1:j+2, :]))-img[i, j, :])/8.
# progress update
def michelson_contrast(img_tile):
""" https://colorusage.arc.nasa.gov/luminance_cont.php """
lum_tile = misc.yuv_conv(img_tile)[..., 0]
c_m = (lum_tile.max() - lum_tile.min()) / (lum_tile.max() + lum_tile.min())
return c_m
def auto_contrast(img_arr, p_lo=0.001, p_hi=0.999, ch=0):
''' according to Adi Shavit on https://stackoverflow.com/questions/9744255/instagram-lux-effect/9761841#9761841 '''
# estimate contrast und brightness parameters (by default: achromatic "luma" channel only)
val_lim = 2**16-1
img_yuv = misc.yuv_conv(img_arr)
h = np.histogram(img_yuv[..., ch], bins=np.arange(val_lim))[0]
H = np.cumsum(h)/float(np.sum(h))
try:
px_lo = find_x_given_y(p_lo, np.arange(val_lim), H)
px_hi = find_x_given_y(p_hi, np.arange(val_lim), H)
except:
px_lo = 0
px_hi = 1
A = np.array([[px_lo, 1], [px_hi, 1]])
b = np.array([0, val_lim])
contrast, brightness = np.dot(np.linalg.inv(A), b)
return contrast, brightness
def robust_awb(self, t=0.3, max_iter=1000):
''' inspired by Jun-yan Huo et al. and http://web.stanford.edu/~sujason/ColorBalancing/Code/robustAWB.m '''
img = Normalizer(self.central_view).type_norm(dtype='float16', lim_min=0, lim_max=1.0)
ref_pixel = img[0, 0, :].copy()
u = .01 # gain step size
a = .8 # double step threshold
b = .001 # convergence threshold
gains_adj = np.array([1., 1., 1.])
for i in range(max_iter):
img_yuv = misc.yuv_conv(img)
f = (abs(img_yuv[..., 1]) + abs(img_yuv[..., 2])) / img_yuv[..., 0]
grays = np.zeros(img_yuv.shape)
grays[f < t] = img_yuv[f < t]
if np.sum(f < t) == 0:
self.sta.status_msg('No valid gray pixels found.', self.cfg.params[self.cfg.opt_prnt])
break
u_bar = np.mean(grays[..., 1]) # estimate
v_bar = np.mean(grays[..., 2]) # estimate
# rgb_est = misc.yuv_conv(np.array([100, u_bar, v_bar]), inverse=True) # convert average gray from YUV to RGB
# U > V: blue needs adjustment otherwise red is treated
err, ch = (u_bar, 2) if abs(u_bar) > abs(v_bar) else (v_bar, 0)
if abs(err) >= a:
def correct_contrast(img_arr, contrast=1, brightness=0, ch=0):
# color model conversion
img_yuv = misc.yuv_conv(img_arr)
# convert to float
f = img_yuv[..., ch].astype(np.float32)
# perform auto contrast (by default: "value" channel only)
img_yuv[..., ch] = contrast * f + brightness
# clip to input extrema to remove contrast outliers
img_yuv[..., ch][img_yuv[..., ch] < img_arr.min()] = img_arr.min()
img_yuv[..., ch][img_yuv[..., ch] > img_arr.max()] = img_arr.max()
# color model conversion
img = misc.yuv_conv(img_yuv, inverse=True)
return img
''' inspired by Jun-yan Huo et al. and http://web.stanford.edu/~sujason/ColorBalancing/Code/robustAWB.m '''
img = misc.type_norm(img, dtype='float16', lim_min=0, lim_max=1.0)
ref_pixel = img[0, 0, :].copy()
u = .01 # gain step size
a = .8 # double step threshold
b = .001 # convergence threshold
gains_adj = np.array([1., 1., 1.])
#sRGBtoXYZ = [[0.4124564, 0.3575761, 0.1804375], [0.2126729, 0.7151522, 0.0721750], [0.0193339, 0.1191920, 0.9503041]]
sRGBtoXYZ = [[0.4124564, 0.2126729, 0.0193339], [0.3575761, 0.7151522, 0.0193339], [0.1804375, 0.0721750, 0.9503041]]
for i in range(max_iter):
img_yuv = misc.yuv_conv(img)
f = (abs(img_yuv[..., 1]) + abs(img_yuv[..., 2])) / img_yuv[..., 0]
grays = np.zeros(img_yuv.shape)
grays[f V: blue needs adjustment otherwise red is treated
err, ch = (u_bar, 2) if abs(u_bar) > abs(v_bar) else (v_bar, 0)
if abs(err) >= a:
# color model conversion
img_yuv = misc.yuv_conv(img_arr)
# convert to float
f = img_yuv[..., ch].astype(np.float32)
# perform auto contrast (by default: "value" channel only)
img_yuv[..., ch] = contrast * f + brightness
# clip to input extrema to remove contrast outliers
img_yuv[..., ch][img_yuv[..., ch] < img_arr.min()] = img_arr.min()
img_yuv[..., ch][img_yuv[..., ch] > img_arr.max()] = img_arr.max()
# color model conversion
img = misc.yuv_conv(img_yuv, inverse=True)
return img