Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
cv = cross_validation.KFold(len(ifeatures), 5, shuffle=True, random_state=123)
scores0 = cross_validation.cross_val_score(
clf, ifeatures, labels, cv=cv)
print('Accuracy (5 fold x-val) with Logistic Regression [image features]: {:.1%}'.format(
scores0.mean()))
from sklearn.cluster import KMeans
from mahotas.features import surf
print('Computing SURF descriptors...')
alldescriptors = []
for im,_ in images():
im = mh.imread(im, as_grey=True)
im = im.astype(np.uint8)
# To use dense sampling, you can try the following line:
# alldescriptors.append(surf.dense(im, spacing=16))
alldescriptors.append(surf.surf(im, descriptor_only=True))
print('Descriptor computation complete.')
k = 256
km = KMeans(k)
concatenated = np.concatenate(alldescriptors)
print('Number of descriptors: {}'.format(
len(concatenated)))
concatenated = concatenated[::64]
print('Clustering with K-means...')
km.fit(concatenated)
def read_bw(fname, options):
'''Read image `fname` as greyscale
Parameters
----------
fname : str, file-name
options : argparse result
Returns
-------
image : ndarray
Two dimensional ndarray
'''
im = mh.imread(fname)
if im.ndim == 2:
return im
if im.ndim == 3:
if options.convert_to_bw == 'max' or im.ptp(2).max() == 0:
# This is a greyscale image, saved as colour
return im.max(2)
if options.convert_to_bw == 'yes':
return mh.colors.rgb2grey(im, dtype=np.uint8)
print_error("{} is not a greyscale image (and --convert-to-bw was not specified)".format(fname), not options.no_color)
sys.exit(1)
def features_for(images):
fs = []
for im in images:
im = mh.imread(im, as_grey=True).astype(np.uint8)
fs.append(mh.features.haralick(im).mean(0))
return np.array(fs)
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
import mahotas as mh
image = mh.imread('../SimpleImageDataset/building05.jpg')
image = mh.colors.rgb2gray(image)
# Compute Gaussian filtered versions with increasing kernel widths
im8 = mh.gaussian_filter(image, 8)
im16 = mh.gaussian_filter(image, 16)
im32 = mh.gaussian_filter(image, 32)
# We now build a composite image with three panels:
#
# [ IM8 | | IM16 | | IM32 ]
h, w = im8.shape
canvas = np.ones((h, 3 * w + 256), np.uint8)
canvas *= 255
canvas[:, :w] = im8
canvas[:, w + 128:2 * w + 128] = im16
def readImagesFromPaths(input_df):
"""
reads in the input_df with the image paths and updates it with the image content.
Parameters: input_df - input file and target file paths
"""
image_content_column = []
target_image_content_column = []
for index, row in input_df.iterrows():
image_content = mh.imread(row['input'], as_grey=True)
#print image_content.shape
#image_content.reshape(1024,1024,1)
image_content = image_content.astype(np.float32, copy=False)
image_content = np.reshape(image_content, (image_content.shape[0], image_content.shape[1],1))
target_content = mh.imread(row['target'])
#target_content.reshape(target_content.shape + (1,))
target_content = target_content.astype(np.float32, copy=False)
target_content = np.reshape(target_content, (target_content.shape[0], target_content.shape[1],1))
image_content_column.append(image_content)
target_image_content_column.append(target_content)
def chist(fname):
from features import chist as color_histogram
im = mh.imread(fname)
return color_histogram(im)
def equalize(image):
image = mh.imread(image)
print image.shape
image = exposure.equalize_adapthist(image)
image = skimage.img_as_ubyte(image)
print image.dtype
return image