How to use the matplotlib.pyplot.imshow function in matplotlib

To help you get started, we’ve selected a few matplotlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sassoftware / python-dlpy / dl_api / images.py View on Github external
if nimages > ncol:
            nrow = nimages // ncol + 1
        else:
            nrow = 1
            ncol = nimages
        if figsize is None:
            figsize = (16, 16 // ncol * nrow)
        fig = plt.figure(figsize=figsize)

        for i in range(nimages):
            image = temp_tbl['Images']['Image'][i]
            label = temp_tbl['Images']['Label'][i]
            ax = fig.add_subplot(nrow, ncol, i + 1)
            ax.set_title('{}'.format(label))
            plt.imshow(image)
            plt.xticks([]), plt.yticks([])
github kwotsin / TensorFlow-ENet / test_enet.py View on Github external
#Save the images
            if save_images:
                if not os.path.exists(photo_dir):
                    os.mkdir(photo_dir)

                #Save the image visualizations for the first 10 images.
                logging.info('Saving the images now...')
                predictions_val, annotations_val = sess.run([predictions, annotations])

                for i in xrange(10):
                    predicted_annotation = predictions_val[i]
                    annotation = annotations_val[i]

                    plt.subplot(1,2,1)
                    plt.imshow(predicted_annotation)
                    plt.subplot(1,2,2)
                    plt.imshow(annotation)
                    plt.savefig(photo_dir+"/image_" + str(i))
github slinderman / theano_pyglm / plotting / plot_results.py View on Github external
def plot_stim_response(s_glm, s_glm_std=None, color=None):
    if 'stim_response_t' in s_glm['bkgd'].keys() and \
       'stim_response_x' in s_glm['bkgd'].keys():
      
        # Get the stimulus responses
        stim_x = s_glm['bkgd']['stim_response_x']
        stim_t = s_glm['bkgd']['stim_response_t']
            
        # Plot the spatial component of the stimulus response
        plt.subplot(1,2,1)
        if stim_x.ndim >= 2:
            px_per_node = 10
            stim_x_max = np.amax(np.abs(stim_x))
            plt.imshow(np.kron(stim_x,np.ones((px_per_node,px_per_node))),
                       vmin=-stim_x_max,vmax=stim_x_max,
                       extent=[0,1,0,1],
                       interpolation='nearest')
            plt.colorbar()
        else:
            plt.plot(stim_x, color=color, linestyle='-')
            plt.hold(True)

            # If standard deviation is given, plot that as well
            if s_glm_std is not None:
                stim_x_std = s_glm_std['bkgd']['stim_response_x']
                plt.plot(stim_x + 2*stim_x_std, color=color, linestyle='--') 
                plt.plot(stim_x - 2*stim_x_std, color=color, linestyle='--')
            
        plt.subplot(1,2,2)
        plt.plot(stim_t, color=color)
github ming71 / toolbox / matplotlib / display / Superpix.py View on Github external
out=mark_boundaries(img,segments,color=(255, 0, 0),mode='subpixel')
	# print(segments)

	plt.subplot(131)
	plt.title("n_segments=160")
	plt.imshow(out)

	start = time.clock()
	segments2 = slic(img, n_segments=300, compactness=10)
	elapsed = (time.clock() - start)
	print("2 Time used:",elapsed)
	out2=mark_boundaries(img,segments2,mode='subpixel')

	plt.subplot(132)
	plt.title("n_segments=300")
	plt.imshow(out2)

	start = time.clock()
	segments3 = slic(img, n_segments=200, compactness=20)
	elapsed = (time.clock() - start)
	print("3 Time used:",elapsed)
	out3=mark_boundaries(img,segments3,mode='subpixel')

	plt.subplot(133)
	plt.title("n_segments=300/100")
	plt.imshow(out3)


	plt.show()
github RenatoBMLR / state-farm-distracted-driver-detection / src / plot_utils.py View on Github external
def plot_confusion(results):
    mc = np.array(pd.crosstab(results['pred'], results['true']))
    path2save = './figures/distribution_classes.png'
    path2save = './figures/distribution_classes.png'
    plt.imshow(mc/mc.sum(axis=1), cmap = 'jet')
    plt.colorbar()
    plt.axis('off')
github naivete5656 / WSISPDR / utils / for_review.py View on Github external
bou_list.extend(target[max_bou - 1, :])
                bou_list.extend(target[:, max_bou_h - 1])
                bou_list.extend(target[:, 0])
                bou_list = np.unique(bou_list)
                for x in bou_list:
                    target[target == x] = 0

                pred_mask = np.zeros(pred.shape)
            # create mask
            target_mask = np.zeros(target.shape)
            target_mask[target == target_label] = 1
            tar_x = target_mask
            pre_x = pred_mask
            if debug:
                plt.imshow(target_mask), plt.show()
                plt.imshow(pred_mask), plt.show()
            pred_mask = pred_mask.flatten()
            target_mask = target_mask.flatten()

            tp = pred_mask.dot(target_mask)
            fn = pred_mask.sum() - tp
            fp = target_mask.sum() - tp

            tps += tp
            fns += fn
            fps += fp

            iou = (tp / (tp + fp + fn))
            bou_list = []
            max_bou = tar_x.shape[0]
            max_bou_h = tar_x.shape[1]
            bou_list.extend(tar_x[0, :])
github CorkAI / Meetup1 / mnist_softmax_2_visualise.py View on Github external
# Test on individual test examples, writing examples of 
    # successful and failed classifications to disk
    if not os.path.exists(os.path.join(os.getcwd(), 'output_images')):
            os.makedirs(os.path.join(os.getcwd(), 'output_images'))
    prediction = tf.argmax(y, 1)  # output the class that is predicted
    num_each_to_store = 5
    stored_correct = 0
    stored_incorrect = 0
    idx = 0
    while (stored_correct < num_each_to_store or stored_incorrect < num_each_to_store) and idx < len(mnist.test.images):
        pred = sess.run(prediction, feed_dict={x: mnist.test.images[idx].reshape(1, 784)})
        real_label = np.argmax(mnist.test.labels[idx])
        correct = pred == real_label
        
        img = np.reshape(mnist.test.images[idx], [28, 28])
        plt.imshow(img, cmap='gray')
        
        if correct and stored_correct < num_each_to_store:
            stored_correct += 1
            plt.savefig("output_images/success_{}.png".format(real_label.astype(str)))
        elif not correct and stored_incorrect < num_each_to_store:
            stored_incorrect += 1
            plt.savefig("output_images/fail_{}_{}.png".format(real_label.astype(str), pred.astype(str)))
        idx += 1
github gsurma / stereo_depth_estimator / stereo_depth_estimator_sgbm.py View on Github external
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
	wls_filter.setLambda(80000)
	wls_filter.setSigmaColor(1.2)

	disparity_left = np.int16(left_matcher.compute(smooth_left, smooth_right))
	disparity_right = np.int16(right_matcher.compute(smooth_right, smooth_left) )

	wls_image = wls_filter.filter(disparity_left, smooth_left, None, disparity_right)
	wls_image = cv2.normalize(src=wls_image, dst=wls_image, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
	wls_image = np.uint8(wls_image)

	fig = plt.figure(figsize=(wls_image.shape[1]/DPI, wls_image.shape[0]/DPI), dpi=DPI, frameon=False);
	ax = plt.Axes(fig, [0., 0., 1., 1.])
	ax.set_axis_off()
	fig.add_axes(ax)
	plt.imshow(wls_image, cmap='jet');
	plt.savefig(DATASET_DISPARITIES+name)
	plt.close()
	create_combined_output(left, right, name)
github berkandemirel / attributes2classname / results.py View on Github external
if master.DATASET == master.datasetList[1]:#AwA dataset

        fig = plt.figure()
        plt.imshow(confusionMatrix, interpolation='nearest')
        plt.xticks(np.arange(0, 10),['pers. cat', 'hippo.', 'leopard', 'hump. whale', 'seal', 'chimpanzee', 'rat', 'g. panda','pig', 'raccoon'], rotation=60)
        plt.yticks(np.arange(0, 10),['pers. cat', 'hippo.', 'leopard', 'hump. whale', 'seal', 'chimpanzee', 'rat', 'g. panda','pig', 'raccoon'])
        plt.gcf().subplots_adjust(bottom=0.25)

        plt.savefig( __C.get('VISUAL_DATA')+'confMatrix_'+str(__C.get('CURR_HIDDEN'))+'_'+timeStamp+'.pdf' )
        plt.close(fig)

    elif master.DATASET == master.datasetList[0]:#aPaY dataset

        fig = plt.figure()
        plt.imshow(confusionMatrix, interpolation='nearest')
        plt.xticks(np.arange(0, 12),['bag', 'build.', 'carr.', 'cent.', 'donkey', 'goat', 'jetski', 'monk.','mug', 'statue', 'wolf', 'zebra'], rotation=60)
        plt.yticks(np.arange(0, 12),['bag', 'build.', 'carr.', 'cent.', 'donkey', 'goat', 'jetski', 'monk.','mug', 'statue', 'wolf', 'zebra'])
        plt.gcf().subplots_adjust(bottom=0.25)

        plt.savefig( __C.get('VISUAL_DATA')+'confMatrix_'+str(__C.get('CURR_HIDDEN'))+'_'+timeStamp+'.pdf' )
        plt.close(fig)

    else:
        pass
github PacktPublishing / Mastering-OpenCV-4-with-Python / Chapter04 / 01-chapter-content / text_drawing_bounding_box.py View on Github external
def show_with_matplotlib(img, title):
    """Shows an image using matplotlib capabilities"""

    # Convert BGR image to RGB:
    img_RGB = img[:, :, ::-1]

    # Show the image using matplotlib:
    plt.imshow(img_RGB)
    plt.title(title)
    plt.show()