Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
global model
model = Darknet(cfgfile)
#model.print_network()
init_width = model.width
init_height = model.height
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
global test_loader
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model)
model = model.module
model = model.to(torch.device("cuda" if use_cuda else "cpu"))
for w in FLAGS.weights:
model.load_weights(w)
logging('evaluating ... %s' % (w))
test()
def create_losses(batchsize, imheight, imwidth, test=True):
import gc
gc.collect()
nnabla_ext.cuda.clear_memory_cache()
anchors = args.num_anchors
classes = args.num_classes
yolo_x = nnabla.Variable((batchsize, 3, imheight, imwidth))
yolo_features = yolov2.yolov2(yolo_x, anchors, classes, test=test)
return yolo_x, yolo_features
yolo_x_nnabla, yolo_features_nnabla = create_losses(
args.valid_batchsize, args.height, args.width, test=True)
nnabla.load_parameters(weightfile)
valid_dataset = dataset.listDataset(valid_images, args,
train=False,
shape=(args.width, args.height), shuffle=False)
assert(args.valid_batchsize > 1)
def batch_iter(it, batch_size):
def list2np(t):
imgs, labels = zip(*t)
retimgs = np.zeros((len(imgs),) + imgs[0].shape, dtype=np.float32)
retlabels = np.zeros(
(len(labels),) + labels[0].shape, dtype=np.float32)
for i, img in enumerate(imgs):
retimgs[i, :, :, :] = img
for i, label in enumerate(labels):
retlabels[i, :] = label
return retimgs, retlabels
retlist = []
def train(epoch):
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target) in enumerate(train_loader):
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
# Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
model = Darknet(modelcfg)
model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
test_width = model.test_width
test_height = model.test_height
num_keypoints = model.num_keypoints
num_labels = num_keypoints * 2 + 3 # +2 for width, height, +1 for class label
# Get the parser for the test dataset
valid_dataset = dataset.listDataset(valid_images,
shape=(test_width, test_height),
shuffle=False,
transform=transforms.Compose([transforms.ToTensor(),]))
# Specify the number of workers for multiple processing, get the dataloader for the test dataset
kwargs = {'num_workers': 4, 'pin_memory': True}
test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)
logging(" Testing {}...".format(name))
logging(" Number of test samples: %d" % len(test_loader.dataset))
# Iterate through test batches (Batch size for test data is 1)
count = 0
for batch_idx, (data, target) in enumerate(test_loader):
t1 = time.time()
# Pass data to GPU
data = data.cuda()
def train(args, epoch, max_epochs, train_graph, yolo_solver,
prefetch_iterator, on_memory_data):
sample_iter = dataset.listDataset(
args.train, args,
shuffle=True,
train=True,
seen=train_graph.seen,
image_sizes=args.size_aug,
image_size_change_freq=args.batch_size * args.accum_times * 10,
on_memory_data=on_memory_data,
use_cv2=not args.disable_cv2)
batch_iter = dataset.create_batch_iter(
iter(sample_iter), batch_size=args.batch_size)
total_loss = []
epoch_seen = 0
tic = time.time()
for batch_idx, ((data_tensor, target_tensor), preprocess_time) \
in enumerate(prefetch_iterator.create(batch_iter)):