Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for tp in types:
print('process {0} ...'.format(tp))
for img in os.listdir(os.path.join(cfg['root'], tp, 'IMAGES')):
filename = os.path.join(cfg['root'], tp, 'IMAGES', img)
print(filename)
if '.jpg?' in filename:
dst = filename.split('?')[0]
shutil.move(filename, dst)
filename = dst
if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'):
try:
image = io.imread(filename)
if len(list(image.shape)) < 3:
image = gray2rgb(filename)
elif len(list(image.shape)) > 3:
image = rgba2rgb(image)
io.imsave(filename, image)
except:
os.remove(filename)
else:
os.remove(filename)
img_path: Full path to the image file to be read
Returns:
An MxNx3 array corresponding to the contents of an MxN image in RGB format.
Returns None in case of errors
"""
try:
if img_path:
# read image
img = skimage.io.imread(img_path)
# if RGBA, drop Alpha channel
if len(img.shape) > 2 and img.shape[2] == 4:
img = img[:, :, :3].copy()
# if only one channel, convert to RGB
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img
except Exception as e:
print (e)
pass
return None
numCh = 1
else:
# FIXME: check this point, number of channels can be on last element on array...
numCh = self.batcherLMDB.shapeImg[0]
# check #channels of input image
if len(pimg.shape) < 3:
numChImg = 1
else:
numChImg = 3
# if #channels of input image is not equal to #channels in TrainDatabse, then convert shape inp Image to Database-Shape
if numCh != numChImg:
if numCh == 1:
# FIXME: this is fix potential bug: rgb2gray change automaticaly min/max range from (0,255) to (0,1), headbang!
pimg = skcolor.rgb2gray(pimg.astype(np.float))
else:
pimg = skcolor.gray2rgb(pimg)
timg = sktransform.resize(pimg.astype(np.float32) * self.batcherLMDB.scaleFactor, self.batcherLMDB.shapeImg[1:])
if numCh==1:
timg = timg.reshape([1] + list(timg.shape))
else:
timg = timg.transpose((2, 0, 1))
if self.batcherLMDB.isRemoveMean:
# FIXME: check this point: type of the mean-removing from one cofig (for train and inference stages)
timg -= self.batcherLMDB.meanChImage
return timg
def inferListImagePath(self, listPathToImages, batchSizeInfer=None):
def my_label2rgboverlay(labels, colors, image, bglabel=None,
bg_color=(0., 0., 0.), alpha=0.2):
image_float = gray2rgb(img_as_float(rgb2gray(image)))
label_image = my_label2rgb(labels, colors, bglabel=bglabel,
bg_color=bg_color)
output = image_float * alpha + label_image * (1 - alpha)
return output
def crf(original_image, annotated_image):
rd.seed(123)
if len(original_image.shape) < 3:
original_image = gray2rgb(original_image)
if len(original_image.shape) == 3 and original_image.shape[2]==4:
original_image = rgba2rgb(original_image)
original_image = img_as_ubyte(original_image)
annotated_image = np.moveaxis(annotated_image, -1, 0)
annotated_image = annotated_image.copy(order='C')
d = dcrf.DenseCRF2D(original_image.shape[1], original_image.shape[0], 3)
U = unary_from_softmax(annotated_image)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
d.addPairwiseBilateral(sxy=(80, 80), srgb=(13, 13, 13), rgbim=original_image,
compat=10,
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def _process_batch(X, batchsize=100):
'''
Process a batch of images for training
Args:
X: a RGB image
'''
grayscaled_rgb = gray2rgb(rgb2gray(X)) # convert to 3 channeled grayscale image
lab_batch = rgb2lab(X) # convert to LAB colorspace
X_batch = lab_batch[:, :, :, 0] # extract L from LAB
X_batch = X_batch.reshape(X_batch.shape + (1,)) # reshape into (batch, IMAGE_SIZE, IMAGE_SIZE, 1)
X_batch = 2 * X_batch / 100 - 1. # normalize the batch
Y_batch = lab_batch[:, :, :, 1:] / 127 # extract AB from LAB
features = _extract_features(grayscaled_rgb, batchsize) # extract features from the grayscale image
return ([X_batch, features], Y_batch)
def my_label2rgboverlay(labels, cmap, image, bglabel=None,
bg_color=(0., 0., 0.), alpha=0.2):
'''Superimpose a mask over an image
Convert a label mask to RGB applying a color map and superimposing it
over an image as a transparent overlay'''
image_float = gray2rgb(img_as_float(rgb2gray(image)))
label_image = my_label2rgb(labels, cmap, bglabel=bglabel,
bg_color=bg_color)
output = image_float * alpha + label_image * (1 - alpha)
return output
width = int(height * 1.0 / oldheight * oldwidth)
elif width and not height:
height = int(width * 1.0 / oldwidth * oldheight)
d_s = "Video scaled to (M, N, T) = (%d, %d, %d)." % (height, width, length)
# Reshape and scale the data
savedata = np.zeros((length, height, width, 3), dtype=np.float32)
for i in range(length):
if is_ndarray:
frame = skimage.img_as_float(data[i, ...])
elif is_timeseries:
frame = data.data[..., i] / float(data.data.max())
frame = skimage.img_as_float(frame)
# resize wants the data to be between 0 and 1
frame = sic.gray2rgb(sit.resize(frame, (height, width),
mode='reflect'))
savedata[i, ...] = frame * 255.0
# Set the input frame rate and frame size
inputdict = {}
inputdict['-r'] = str(int(fps))
inputdict['-s'] = '%dx%d' % (width, height)
# Set the output frame rate
outputdict = {}
outputdict['-r'] = str(int(fps))
# Save data to file
svio.vwrite(filename, savedata, inputdict=inputdict, outputdict=outputdict,
backend=backend)
logging.getLogger(__name__).info("Saved video to file '%s'." % filename)
def prepare_input_image_batch(X, batchsize=100):
'''
This is a helper function which does the same as _preprocess_batch,
but it is meant to be used with images during testing, not training.
Args:
X: A grayscale image
'''
X_processed = X / 255. # normalize grayscale image
X_grayscale = gray2rgb(rgb2gray(X_processed))
X_features = _extract_features(X_grayscale, batchsize)
X_lab = rgb2lab(X_grayscale)[:, :, :, 0]
X_lab = X_lab.reshape(X_lab.shape + (1,))
X_lab = 2 * X_lab / 100 - 1.
return X_lab, X_features