Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for snapindex in snapindices:
dlc_cfg['init_weights'] = os.path.join(str(modelfolder),'train',Snapshots[snapindex]) #setting weights to corresponding snapshot.
trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1] #read how many training siterations that corresponds to.
# Name for deeplabcut net (based on its parameters)
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations)
notanalyzed,resultsfilename,DLCscorer=auxiliaryfunctions.CheckifNotEvaluated(str(evaluationfolder),DLCscorer,DLCscorerlegacy,Snapshots[snapindex])
print("Running ", DLCscorer, " with # of trainingiterations:", trainingsiterations)
if notanalyzed:
# Specifying state of model (snapshot / training state)
sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
Numimages = len(Data.index)
PredicteData = np.zeros((Numimages,3 * len(dlc_cfg['all_joints_names'])))
print("Analyzing data...")
for imageindex, imagename in tqdm(enumerate(Data.index)):
image = imread(os.path.join(cfg['project_path'],imagename),mode='RGB')
if scale!=1:
image = imresize(image, scale)
#image = skimage.color.gray2rgb(image)
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref = predict.extract_cnn_output(outputs_np, dlc_cfg)
# Extract maximum scoring location from the heatmap, assume 1 person
pose = predict.argmax_pose_predict(scmap, locref, dlc_cfg.stride)
PredicteData[imageindex, :] = pose.flatten() # NOTE: thereby cfg_test['all_joints_names'] should be same order as bodyparts!
sess.close() #closes the current tf session
def GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,batchsize,rgb):
''' Batchwise prediction of pose for frame list in directory'''
#from skimage.io import imread
from deeplabcut.utils.auxfun_videos import imread
print("Starting to extract posture")
if rgb:
im=imread(os.path.join(directory,framelist[0]),mode='RGB')
else:
im=imread(os.path.join(directory,framelist[0]))
ny,nx,nc=np.shape(im)
print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)
PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
batch_ind = 0 # keeps track of which image within a batch should be written to
batch_num = 0 # keeps track of which batch you are at
if cfg['cropping']:
print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
nx,ny=cfg['x2']-cfg['x1'],cfg['y2']-cfg['y1']
if nx>0 and ny>0:
pass
else:
raise Exception('Please check the order of cropping parameter!')
def make_batch(self, data_item, scale, mirror):
im_file = data_item.im_path
logging.debug('image %s', im_file)
logging.debug('mirror %r', mirror)
#print(im_file, os.getcwd())
#print(self.cfg.project_path)
image = imread(os.path.join(self.cfg.project_path,im_file), mode='RGB')
if self.has_gt:
joints = np.copy(data_item.joints)
if self.cfg.crop: #adapted cropping for DLC
if np.random.rand()
if counter%step==0:
pbar.update(step)
if cfg['cropping']:
frame= img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
else:
frame = img_as_ubyte(im)
pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
PredictedData[counter, :] = pose.flatten()
else:
frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all the frames of a batch
for counter,framename in enumerate(framelist):
if rgb:
im=imread(os.path.join(directory,framename),mode='RGB')
else:
im=imread(os.path.join(directory,framename))
if counter%step==0:
pbar.update(step)
if cfg['cropping']:
frames[batch_ind] = img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
else:
frames[batch_ind] = img_as_ubyte(im)
if batch_ind==batchsize-1:
pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
PredictedData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
batch_ind = 0
batch_num += 1
def GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,batchsize,rgb):
''' Batchwise prediction of pose for frame list in directory'''
#from skimage.io import imread
from deeplabcut.utils.auxfun_videos import imread
print("Starting to extract posture")
if rgb:
im=imread(os.path.join(directory,framelist[0]),mode='RGB')
else:
im=imread(os.path.join(directory,framelist[0]))
ny,nx,nc=np.shape(im)
print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)
PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
batch_ind = 0 # keeps track of which image within a batch should be written to
batch_num = 0 # keeps track of which batch you are at
if cfg['cropping']:
print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
nx,ny=cfg['x2']-cfg['x1'],cfg['y2']-cfg['y1']
if nx>0 and ny>0:
pass
else:
raise Exception('Please check the order of cropping parameter!')
if cfg['x1']>=0 and cfg['x2']=0 and cfg['y2']
idx = np.random.choice(self.num_images)
scale = self.get_scale()
size = self.data[idx].im_size
target_size = np.ceil(size[1:3]*scale).astype(int)
if self.is_valid_size(target_size[1] * target_size[0]):
break
stride = self.cfg.stride
for i in range(self.batch_size):
data_item = self.data[img_idx[i]]
data_items.append(data_item)
im_file = data_item.im_path
logging.debug('image %s', im_file)
image = imread(os.path.join(self.cfg.project_path,im_file), mode='RGB')
if self.has_gt:
joints = np.copy(data_item.joints)
joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
joint_points = [person_joints[:, 1:3] for person_joints in joints]
joint_ids.append(joint_id)
batch_joints.append(arr(joint_points)[0])
batch_images.append(image)
sm_size = np.ceil(target_size / (stride * 2)).astype(int) * 2
assert len(batch_images) == self.batch_size
return batch_images, joint_ids, batch_joints, data_items, sm_size, target_size
else:
raise Exception('Please check the order of cropping parameter!')
if cfg['x1']>=0 and cfg['x2']=0 and cfg['y2']
pbar.update(step)
if cfg['cropping']:
frame= img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
else:
frame = img_as_ubyte(im)
pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
PredictedData[counter, :] = pose.flatten()
else:
frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all the frames of a batch
for counter,framename in enumerate(framelist):
if rgb:
im=imread(os.path.join(directory,framename),mode='RGB')
else:
im=imread(os.path.join(directory,framename))
if counter%step==0:
pbar.update(step)
if cfg['cropping']:
frames[batch_ind] = img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
else:
frames[batch_ind] = img_as_ubyte(im)
if batch_ind==batchsize-1:
pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
PredictedData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
batch_ind = 0
batch_num += 1
else:
batch_ind+=1
def make_batch(self, data_item, scale, mirror):
im_file = data_item.im_path
logging.debug('image %s', im_file)
logging.debug('mirror %r', mirror)
#print(im_file, os.getcwd())
#print(self.cfg.project_path)
image = imread(os.path.join(self.cfg.project_path,im_file), mode='RGB')
if self.has_gt:
joints = np.copy(data_item.joints)
if self.cfg.crop: #adapted cropping for DLC
if np.random.rand()