Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Hiding these widgets and show them once the video is loaded
self.start_frames_sizer.ShowItems(show=False)
self.end_frames_sizer.ShowItems(show=False)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
# Variables initialization
self.numberFrames = 0
self.currFrame = 0
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.drs = []
self.cfg = auxiliaryfunctions.read_config(config)
self.Task = self.cfg['Task']
self.start = self.cfg['start']
self.stop = self.cfg['stop']
self.date = self.cfg['date']
self.trainFraction = self.cfg['TrainingFraction']
self.trainFraction = self.trainFraction[0]
self.videos = self.cfg['video_sets'].keys()
self.bodyparts = self.cfg['bodyparts']
self.colormap = plt.get_cmap(self.cfg['colormap'])
self.colormap = self.colormap.reversed()
self.markerSize = self.cfg['dotsize']
self.alpha = self.cfg['alphavalue']
self.video_names = [Path(i).stem for i in self.videos]
self.config_path = Path(config)
self.extract_range_frame = False
self.extract_from_analyse_video = False
"Scorer": DLCscorer,
"DLC-model-config file": dlc_cfg,
"fps": fps,
"batch_size": dlc_cfg["batch_size"],
"frame_dimensions": (ny, nx),
"nframes": nframes,
"iteration (active-learning)": cfg["iteration"],
"training set fraction": trainFraction,
"cropping": cfg['cropping'],
"cropping_parameters": coords
#"gpu_info": device_lib.list_local_devices()
}
metadata = {'data': dictionary}
print("Saving results in %s..." %(Path(video).parents[0]))
auxiliaryfunctions.SaveData(PredictedData[:nframes,:], metadata, dataname, pdindex, range(nframes),save_as_csv)
return DLCscorer
else:
return DLCscorer
If true the user will be asked specifically for each folder in labeled-data if the containing csv shall be converted to hdf format.
scorer: string, optional
If a string is given, then the scorer/annotator in all csv and hdf files that are changed, will be overwritten with this name.
Examples
--------
Convert csv annotation files for reaching-task project into hdf.
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml')
--------
Convert csv annotation files for reaching-task project into hdf while changing the scorer/annotator in all annotation files to Albert!
>>> deeplabcut.convertcsv2h5('/analysis/project/reaching-task/config.yaml',scorer='Albert')
--------
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
folders = [Path(config).parent / 'labeled-data' /Path(i) for i in video_names]
if scorer==None:
scorer=cfg['scorer']
for folder in folders:
try:
if userfeedback==True:
print("Do you want to convert the csv file in folder:", folder, "?")
askuser = input("yes/no")
else:
askuser="yes"
if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha': # multilanguage support :)
fn=os.path.join(str(folder),'CollectedData_' + cfg['scorer'] + '.csv')
def comparevideolistsanddatafolders(config):
"""
Auxiliary function that compares the folders in labeled-data and the ones listed under video_sets (in the config file).
Parameter
----------
config : string
String containing the full path of the config file in the project.
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
alldatafolders = [fn for fn in os.listdir(Path(config).parent / 'labeled-data') if '_labeled' not in fn]
print("Config file contains:", len(video_names))
print("Labeled-data contains:", len(alldatafolders))
for vn in video_names:
if vn in alldatafolders:
pass
else:
print(vn, " is missing as a folder!")
for vn in alldatafolders:
if vn in video_names:
pass
Arima model:
deeplabcut.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filterype='arima',ARdegree=5,MAdegree=2)
Use median filter over 10bins:
deeplabcut.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,windowlength=10)
One can then use the filtered rather than the frame-by-frame predictions by calling:
deeplabcut.plot_trajectories('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
deeplabcut.create_labeled_video('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
--------
Returns filtered pandas array with the same structure as normal output of network.
"""
cfg = auxiliaryfunctions.read_config(config)
DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
Videos=auxiliaryfunctions.Getlistofvideos(video,videotype)
if len(Videos)>0:
for video in Videos:
if destfolder is None:
destfolder = str(Path(video).parents[0])
print("Filtering with %s model %s"%(filtertype,video))
videofolder = destfolder
vname=Path(video).stem
notanalyzed,outdataname,sourcedataname,scorer=auxiliaryfunctions.CheckifPostProcessing(destfolder,vname,DLCscorer,DLCscorerlegacy,suffix='filtered')
if notanalyzed:
Dataframe = pd.read_hdf(sourcedataname,'df_with_missing')
for bpindex,bp in tqdm(enumerate(cfg['bodyparts'])):
pdindex = pd.MultiIndex.from_product([[scorer], [bp], ['x', 'y','likelihood']],names=['scorer', 'bodyparts', 'coords'])
def undistort_points(config,dataframe,camera_pair,destfolder):
cfg_3d = auxiliaryfunctions.read_config(config)
img_path,path_corners,path_camera_matrix,path_undistort=auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
'''
path_undistort = destfolder
filename_cam1 = Path(dataframe[0]).stem
filename_cam2 = Path(dataframe[1]).stem
#currently no interm. saving of this due to high speed.
# check if the undistorted files are already present
if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
else:
'''
if True:
# Create an empty dataFrame to store the undistorted 2d coordinates and likelihood