Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
>>> deeplabcut.extract_outlier_frames('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
--------
for extracting the frames with default settings
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'])
--------
for extracting the frames with kmeans
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],extractionalgorithm='kmeans')
--------
for extracting the frames with kmeans and epsilon = 5 pixels.
>>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],epsilon = 5,extractionalgorithm='kmeans')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
if destfolder is None:
videofolder = str(Path(video).parents[0])
else:
videofolder=destfolder
notanalyzed,dataname,DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,str(Path(video).stem),DLCscorer,DLCscorerlegacy,flag='checking')
if notanalyzed:
print("It seems the video has not been analyzed yet, or the video is not found! You can only refine the labels after the a video is analyzed. Please run 'analyze_video' first. Or, please double check your video file path")
else:
Dataframe = pd.read_hdf(dataname,'df_with_missing')
scorer=Dataframe.columns.get_level_values(0)[0] #reading scorer from
nframes=np.size(Dataframe.index)
# extract min and max index based on start stop interval.
startindex=max([int(np.floor(nframes*cfg['start'])),0])
stopindex=min([int(np.ceil(nframes*cfg['stop'])),nframes])
trainingsetindex: int, optional
Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
save_as_csv: bool, optional
Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``
destfolder: string, optional
Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
folder also needs to be passed.
"""
# Load config file, scorer and videos
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
print("Processing %s"%(video))
if destfolder is None:
destfolder= str(Path(video).parents[0])
vname=Path(video).stem
notanalyzed,outdataname,sourcedataname,scorer=auxiliaryfunctions.CheckifPostProcessing(destfolder,vname,DLCscorer,DLCscorerlegacy,suffix='_skeleton')
if notanalyzed:
Dataframe = pd.read_hdf(sourcedataname,'df_with_missing')
# Process skeleton
bones = {}
for bp1, bp2 in cfg['skeleton']:
name = "{}_{}".format(bp1, bp2)
bones[name] = analyzebone(Dataframe[scorer][bp1], Dataframe[scorer][bp2])
skeleton = pd.concat(bones, axis=1)
--------
"""
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg['TrainingFraction'][trainingsetindex]
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction) #automatically loads corresponding model (even training iteration based on snapshot index)
bodyparts=auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(cfg,displayedbodyparts)
if draw_skeleton:
bodyparts2connect = cfg['skeleton']
skeleton_color = cfg['skeleton_color']
else:
bodyparts2connect = None
skeleton_color = None
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
if destfolder is None:
videofolder= Path(video).parents[0] #where your folder with videos is.
else:
videofolder=destfolder
os.chdir(str(videofolder))
videotype = Path(video).suffix
print("Starting % ", videofolder, videos)
vname = str(Path(video).stem)
#if notanalyzed:
#notanalyzed,outdataname,sourcedataname,DLCscorer=auxiliaryfunctions.CheckifPostProcessing(folder,vname,DLCscorer,DLCscorerlegacy,suffix='checking')
if filtered==True:
If true then plots are also displayed.
destfolder: string, optional
Specifies the destination folder that was used for storing analysis data (default is the path of the video).
Example
--------
for labeling the frames
>>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
--------
"""
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg['TrainingFraction'][trainingsetindex]
DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction) #automatically loads corresponding model (even training iteration based on snapshot index)
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
for video in Videos:
print(video)
if destfolder is None:
videofolder = str(Path(video).parents[0])
else:
videofolder=destfolder
videotype = str(Path(video).suffix)
vname = str(Path(video).stem)
print("Starting % ", videofolder, video)
notanalyzed, dataname, DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,vname,DLCscorer,DLCscorerlegacy,flag='checking')
if notanalyzed:
print("The video was not analyzed with this scorer:", DLCscorer)
else:
#LoadData
#sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
if TFGPUinference:
sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_cfg)
else:
sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
pdindex = pd.MultiIndex.from_product([[DLCscorer],
dlc_cfg['all_joints_names'],
xyz_labs],
names=['scorer', 'bodyparts', 'coords'])
##################################################
# Datafolder
##################################################
Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
if len(Videos)>0:
#looping over videos
for video in Videos:
DLCscorer=AnalyzeVideo(video,DLCscorer,DLCscorerlegacy,trainFraction,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv, destfolder,TFGPUinference,dynamic)
os.chdir(str(start_path))
print("The videos are analyzed. Now your research can truly start! \n You can create labeled videos with 'create_labeled_video'.")
print("If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract any outlier frames!")
return DLCscorer #note: this is either DLCscorer or DLCscorerlegacy depending on what was used!
else:
print("No video/s found. Please check your path!")
return DLCscorer
Use median filter over 10bins:
deeplabcut.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,windowlength=10)
One can then use the filtered rather than the frame-by-frame predictions by calling:
deeplabcut.plot_trajectories('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
deeplabcut.create_labeled_video('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
--------
Returns filtered pandas array with the same structure as normal output of network.
"""
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(video,videotype)
if len(Videos)>0:
for video in Videos:
if destfolder is None:
destfolder = str(Path(video).parents[0])
print("Filtering with %s model %s"%(filtertype,video))
videofolder = destfolder
vname=Path(video).stem
notanalyzed,outdataname,sourcedataname,scorer=auxiliaryfunctions.CheckifPostProcessing(destfolder,vname,DLCscorer,DLCscorerlegacy,suffix='filtered')
if notanalyzed:
Dataframe = pd.read_hdf(sourcedataname,'df_with_missing')
for bpindex,bp in tqdm(enumerate(cfg['bodyparts'])):
pdindex = pd.MultiIndex.from_product([[scorer], [bp], ['x', 'y','likelihood']],names=['scorer', 'bodyparts', 'coords'])
x,y,p=Dataframe[scorer][bp]['x'].values,Dataframe[scorer][bp]['y'].values,Dataframe[scorer][bp]['likelihood'].values