Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif Snapindex == "all":
snapindices = range(len(Snapshots))
elif Snapindex
pickle_file = str(output_filename+'_includingmetadata.pickle')
metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)
img_path,path_corners,path_camera_matrix,path_undistort=auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
path_stereo_file = os.path.join(path_camera_matrix,'stereo_params.pickle')
stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
cam_pair = str(cam_names[0]+'-'+cam_names[1])
if_video_analyzed = False # variable to keep track if the video was already analyzed
# Check for the camera matrix
for k in metadata_['stereo_matrix'].keys():
if np.all(metadata_['stereo_matrix'][k] == stereo_file[cam_pair][k]) :
pass
else:
run_triangulate = True
# Check for scorer names in the pickle file of 3d output
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations='unknown')
if metadata_['scorer_name'][cam_names[j]] == DLCscorer: #TODO: CHECK FOR BOTH?
if_video_analyzed=True
elif metadata_['scorer_name'][cam_names[j]] == DLCscorerlegacy:
if_video_analyzed=True
else:
if_video_analyzed=False
run_triangulate = True
if if_video_analyzed:
print("This file is already analyzed!")
dataname.append(os.path.join(destfolder,vname + DLCscorer + '.h5'))
scorer_name[cam_names[j]] = DLCscorer
else:
# Analyze video if score name is different
DLCscorer = predict_videos.analyze_videos(config_2d,[video],videotype=videotype,shuffle=shuffle,trainingsetindex=trainingsetindex,gputouse=gputouse,destfolder=destfolder)
The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
the corresponding shuffle of training dataset. Default is set to 1
trainingsetindex: int, optional
Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
save_as_csv: bool, optional
Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``
destfolder: string, optional
Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
folder also needs to be passed.
"""
# Load config file, scorer and videos
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
print("Processing %s"%(video))
if destfolder is None:
destfolder= str(Path(video).parents[0])
vname=Path(video).stem
notanalyzed,outdataname,sourcedataname,scorer=auxiliaryfunctions.CheckifPostProcessing(destfolder,vname,DLCscorer,DLCscorerlegacy,suffix='_skeleton')
if notanalyzed:
Dataframe = pd.read_hdf(sourcedataname,'df_with_missing')
# Process skeleton
bones = {}
for bp1, bp2 in cfg['skeleton']:
name = "{}_{}".format(bp1, bp2)
bones[name] = analyzebone(Dataframe[scorer][bp1], Dataframe[scorer][bp2])
>>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
--------
If you want to create the labeled video for all the videos (as .avi extension) in a directory.
>>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])
--------
If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
>>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg['TrainingFraction'][trainingsetindex]
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction) #automatically loads corresponding model (even training iteration based on snapshot index)
bodyparts=auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(cfg,displayedbodyparts)
if draw_skeleton:
bodyparts2connect = cfg['skeleton']
skeleton_color = cfg['skeleton_color']
else:
bodyparts2connect = None
skeleton_color = None
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
if destfolder is None:
videofolder= Path(video).parents[0] #where your folder with videos is.
else:
videofolder=destfolder
Windows example for extracting the frames with default settings
>>> deeplabcut.extract_outlier_frames('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
--------
for extracting the frames with default settings
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'])
--------
for extracting the frames with kmeans
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],extractionalgorithm='kmeans')
--------
for extracting the frames with kmeans and epsilon = 5 pixels.
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],epsilon = 5,extractionalgorithm='kmeans')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
if destfolder is None:
videofolder = str(Path(video).parents[0])
else:
videofolder=destfolder
notanalyzed,dataname,DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,str(Path(video).stem),DLCscorer,DLCscorerlegacy,flag='checking')
if notanalyzed:
print("It seems the video has not been analyzed yet, or the video is not found! You can only refine the labels after the a video is analyzed. Please run 'analyze_video' first. Or, please double check your video file path")
else:
Dataframe = pd.read_hdf(dataname,'df_with_missing')
scorer=Dataframe.columns.get_level_values(0)[0] #reading scorer from
nframes=np.size(Dataframe.index)
# extract min and max index based on start stop interval.
startindex=max([int(np.floor(nframes*cfg['start'])),0])
showfigures: bool, default false
If true then plots are also displayed.
destfolder: string, optional
Specifies the destination folder that was used for storing analysis data (default is the path of the video).
Example
--------
for labeling the frames
>>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
--------
"""
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg['TrainingFraction'][trainingsetindex]
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction) #automatically loads corresponding model (even training iteration based on snapshot index)
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
print(video)
if destfolder is None:
videofolder = str(Path(video).parents[0])
else:
videofolder=destfolder
videotype = str(Path(video).suffix)
vname = str(Path(video).stem)
print("Starting % ", videofolder, video)
notanalyzed, dataname, DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,vname,DLCscorer,DLCscorerlegacy,flag='checking')
if notanalyzed:
print("The video was not analyzed with this scorer:", DLCscorer)
else:
deeplabcut.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filterype='arima',ARdegree=5,MAdegree=2)
Use median filter over 10bins:
deeplabcut.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,windowlength=10)
One can then use the filtered rather than the frame-by-frame predictions by calling:
deeplabcut.plot_trajectories('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
deeplabcut.create_labeled_video('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
--------
Returns filtered pandas array with the same structure as normal output of network.
"""
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(video,videotype)
if len(Videos)>0:
for video in Videos:
if destfolder is None:
destfolder = str(Path(video).parents[0])
print("Filtering with %s model %s"%(filtertype,video))
videofolder = destfolder
vname=Path(video).stem
notanalyzed,outdataname,sourcedataname,scorer=auxiliaryfunctions.CheckifPostProcessing(destfolder,vname,DLCscorer,DLCscorerlegacy,suffix='filtered')
if notanalyzed:
Dataframe = pd.read_hdf(sourcedataname,'df_with_missing')
for bpindex,bp in tqdm(enumerate(cfg['bodyparts'])):
pdindex = pd.MultiIndex.from_product([[scorer], [bp], ['x', 'y','likelihood']],names=['scorer', 'bodyparts', 'coords'])
x,y,p=Dataframe[scorer][bp]['x'].values,Dataframe[scorer][bp]['y'].values,Dataframe[scorer][bp]['likelihood'].values
#update batchsize (based on parameters in config.yaml)
dlc_cfg['batch_size']=cfg['batch_size']
else:
dlc_cfg['batch_size']=batchsize
cfg['batch_size']=batchsize
if dynamic[0]: #state=true
#(state,detectiontreshold,margin)=dynamic
print("Starting analysis in dynamic cropping mode with parameters:", dynamic)
dlc_cfg['num_outputs']=1
TFGPUinference=False
dlc_cfg['batch_size']=1
print("Switching batchsize to 1, num_outputs (per animal) to 1 and TFGPUinference to False (all these features are not supported in this mode).")
# Name for scorer:
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations=trainingsiterations)
if dlc_cfg['num_outputs']>1:
if TFGPUinference:
print("Switching to numpy-based keypoint extraction code, as multiple point extraction is not supported by TF code currently.")
TFGPUinference=False
print("Extracting ", dlc_cfg['num_outputs'], "instances per bodypart")
xyz_labs_orig = ['x', 'y', 'likelihood']
suffix = [str(s+1) for s in range(dlc_cfg['num_outputs'])]
suffix[0] = '' # first one has empty suffix for backwards compatibility
xyz_labs = [x+s for s in suffix for x in xyz_labs_orig]
else:
xyz_labs = ['x', 'y', 'likelihood']
#sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
if TFGPUinference:
sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_cfg)
else: