Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
start = time.time()
task_path = assert_dirs(root, 'cifar10')
url = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/cifar10/cifar-10-binary.tar.gz'
rq.files(url, path_join(task_path, url.split('/')[-1]))
with tarfile.open(path_join(task_path, url.split('/')[-1])) as t:
t.extractall(task_path)
noise_flie = tf.io.gfile.listdir(task_path)
for file in ['data_batch_1.bin', 'data_batch_2.bin', 'data_batch_3.bin', 'data_batch_4.bin', 'data_batch_5.bin']:
with open(path_join(task_path, file), 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3072+1)
train = data[:, 1:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
train_label = data[:, 0].astype(np.int32)
for i in set(train_label):
tf.io.gfile.makedirs(path_join(task_path, 'train', str(i)))
for idx in range(train.shape[0]):
imageio.imsave(path_join(task_path, 'train', str(train_label[idx]), str(idx)+'.png'), train[idx])
for file in ['test_batch.bin']:
with open(path_join(task_path, file), 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3072+1)
test = data[:, 1:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
test_label = data[:, 0].astype(np.int32)
for i in set(test_label):
tf.io.gfile.makedirs(path_join(task_path, 'test', str(i)))
for idx in range(test.shape[0]):
imageio.imsave(path_join(task_path, 'test', str(test_label[idx]), str(idx)+'.png'), test[idx])
for file in noise_flie:
tf.io.gfile.remove(path_join(task_path, file))
print('cifar10 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
truth_label_img = imageio.imread(os.path.join(gt_dir, file_name_truth))
iou = util_functions.iou_metric(truth_label_img, pred, divide_flag=True)
iou_record.append(iou)
iou_return[tile_name] = iou
duration = time.time() - start_time
if verb:
print('{} mean IoU={:.3f}, duration: {:.3f}'.format(tile_name, iou[0]/iou[1], duration))
# save results
if save_result:
pred_save_dir = os.path.join(score_save_dir, 'pred')
if not os.path.exists(pred_save_dir):
os.makedirs(pred_save_dir)
imageio.imsave(os.path.join(pred_save_dir, tile_name+'.png'), pred.astype(np.uint8))
with open(os.path.join(score_save_dir, 'result.txt'), 'a+') as file:
file.write('{} {}\n'.format(tile_name, iou))
if show_figure:
plt.figure(figsize=(12, 4))
ax1 = plt.subplot(121)
ax1.imshow(truth_label_img)
plt.title('Truth')
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
ax2.imshow(pred)
plt.title('pred')
plt.suptitle('{} Results on {} IoU={:3f}'.format(self.model_name, file_name_truth.split('_')[0], iou[0]/iou[1]))
plt.show()
iou_record = np.array(iou_record)
mean_iou = np.sum(iou_record[:, 0]) / np.sum(iou_record[:, 1])
task_path = os.path.join(root, 'mnist_tibetan')
url_list = ['https://raw.githubusercontent.com/Hourout/datasets/master/TibetanMNIST/TibetanMNIST_28_28_01.csv',
'https://raw.githubusercontent.com/Hourout/datasets/master/TibetanMNIST/TibetanMNIST_28_28_02.csv']
if tf.gfile.Exists(task_path):
tf.gfile.DeleteRecursively(task_path)
tf.gfile.MakeDirs(task_path)
data = pd.DataFrame()
for url in url_list:
s = requests.get(url).content
data = pd.concat([data, pd.read_csv(io.StringIO(s.decode('utf-8')),header=None, dtype='uint8')])
train = data.loc[:, 1:].values.reshape(-1, 28, 28)
train_label = data.loc[:, 0].values
for i in set(train_label):
tf.gfile.MakeDirs(os.path.join(task_path, 'train', str(i)))
for idx in range(train.shape[0]):
imageio.imsave(os.path.join(task_path, 'train', str(train_label[idx]), str(idx)+'.png'), train[idx])
print('mnist_tibetan dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
def pad_image(location1, location2, res):
resolution = (res, res, 3)
img1 = imageio.imread(location1, pilmode='RGB').astype(np.float)
img2 = imageio.imread(location2, pilmode='RGB').astype(np.float)
if((img1.shape[0] <= res and img1.shape[1] <= res) or (img2.shape[0] <= res and img2.shape[1] <= res)):
padded = pad(img1, resolution)
imageio.imsave(location1, padded)
padded = pad(img2, resolution)
imageio.imsave(location2, padded)
else:
os.remove(location1)
os.remove(location2)
def _encode_image(self, arr, cmap):
io = BytesIO()
imsave(io, arr, cmap=cmap)
io.seek(0)
img_str = base64.b64encode(io.getvalue()).decode()
return 'data:image/{};base64,{}'.format('png', img_str)
dummy_episode = NavigationEpisode(
goals=[goal],
episode_id="dummy_id",
scene_id="dummy_scene",
start_position=agent_position,
start_rotation=agent_rotation,
)
target_image = maps.pointnav_draw_target_birdseye_view(
agent_position,
agent_rotation,
np.asarray(dummy_episode.goals[0].position),
goal_radius=dummy_episode.goals[0].radius,
agent_radius_px=25,
)
imageio.imsave(
os.path.join(IMAGE_DIR, "pointnav_target_image.png"), target_image
)
# optimize mesh with silhouette reprojection error and
# geometry constraints
loss = neg_iou_loss(images_pred[:, 3], images_gt[:, 3]) + \
0.03 * laplacian_loss + \
0.0003 * flatten_loss
loop.set_description('Loss: %.4f'%(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
image = images_pred.detach().cpu().numpy()[0].transpose((1, 2, 0))
writer.append_data((255*image).astype(np.uint8))
imageio.imsave(os.path.join(args.output_dir, 'deform_%05d.png'%i), (255*image[..., -1]).astype(np.uint8))
# save optimized mesh
model(1)[0].save_obj(os.path.join(args.output_dir, 'plane.obj'), save_texture=False)
range_x = np.where(np.any(top_down_map, axis=1))[0]
range_y = np.where(np.any(top_down_map, axis=0))[0]
padding = int(np.ceil(top_down_map.shape[0] / 125))
range_x = (
max(range_x[0] - padding, 0),
min(range_x[-1] + padding + 1, top_down_map.shape[0]),
)
range_y = (
max(range_y[0] - padding, 0),
min(range_y[-1] + padding + 1, top_down_map.shape[1]),
)
top_down_map = top_down_map[
range_x[0] : range_x[1], range_y[0] : range_y[1]
]
top_down_map = recolor_map[top_down_map]
imageio.imsave(os.path.join(IMAGE_DIR, "top_down_map.png"), top_down_map)
if image.shape[0] < 256 or image.shape[1] < 256 or abs(image.shape[0] - image.shape[1]) > 5:
continue
indexes.append(index)
image_paths = image_paths[indexes]
ages = ages[indexes].astype(np.float32).tolist()
genders = genders[indexes].tolist()
# Preprocess images and create JSON.
json_list = []
for image_path, age, gender in zip(image_paths, ages, genders):
image = imageio.imread(os.path.join(dataset_base, image_path))
image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),
preserve_range=True)
if len(image.shape) == 2:
image = color.gray2rgb(image)
image_name = os.path.basename(image_path)
imageio.imsave(os.path.join(preprocessed_directory, image_name), image.astype(np.uint8))
gender = {0: 'female', 1: 'male'}[gender]
json_list.append([image_name, age, gender])
with open(os.path.join(preprocessed_directory, 'meta.json'), 'w+') as json_file:
json.dump(json_list, json_file)
data = request.json
if 'image' in data:
blob = io.BytesIO(base64.b64decode(data['image']))
img = Image.open(blob).convert('RGB')
elif 'url' in data:
blob = io.BytesIO(requests.get(data['url']).content)
img = Image.open(blob).convert('RGB')
else:
raise ValueError(
f'No image source found in request fields: {data.keys()}')
mask = segmentator.predict(img)
mask = (mask * 255).astype(np.uint8)
fmem = io.BytesIO()
imsave(fmem, mask, 'png')
fmem.seek(0)
mask64 = base64.b64encode(fmem.read()).decode('utf-8')
result['data'] = {'mask': mask64}
result['success'] = True
except Exception as e:
logger.exception(e)
result['message'] = str(e)
status = HTTPStatus.INTERNAL_SERVER_ERROR
result['total'] = time.time() - start
return jsonify(result), status