Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
>>> trainIndexes, testIndexes=deeplabcut.mergeandsplit(config,trainindex=0,uniform=False)
returns the indices for the first video folder (as defined in config file) as testIndexes and all others as trainIndexes.
You can then create the training set by calling (e.g. defining it as Shuffle 3):
>>> deeplabcut.create_training_dataset(config,Shuffles=[3],trainIndexes=trainIndexes,testIndexes=testIndexes)
To freeze a (uniform) split:
>>> trainIndexes, testIndexes=deeplabcut.mergeandsplit(config,trainindex=0,uniform=True)
You can then create two model instances that have the identical trainingset. Thereby you can assess the role of various parameters on the performance of DLC.
>>> deeplabcut.create_training_dataset(config,Shuffles=[0],trainIndexes=trainIndexes,testIndexes=testIndexes)
>>> deeplabcut.create_training_dataset(config,Shuffles=[1],trainIndexes=trainIndexes,testIndexes=testIndexes)
--------
"""
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
scorer = cfg['scorer']
project_path = cfg['project_path']
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg) #Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(Path(os.path.join(project_path,str(trainingsetfolder))),recursive=True)
fn=os.path.join(project_path,trainingsetfolder,'CollectedData_'+cfg['scorer'])
try:
Data= pd.read_hdf(fn+'.h5', 'df_with_missing')
except FileNotFoundError:
Data = merge_annotateddatasets(cfg,project_path,Path(os.path.join(project_path,trainingsetfolder)),windows2linux=windows2linux)
Data = Data[scorer] #extract labeled data
if uniform==True:
TrainingFraction = cfg['TrainingFraction']
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
# Variables initialization
self.numberFrames = 0
self.currFrame = 0
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.drs = []
self.extract_range_frame = False
self.firstFrame = 0
# self.cropping = False
# Read confing file
self.cfg = auxiliaryfunctions.read_config(config)
self.Task = self.cfg['Task']
self.start = self.cfg['start']
self.stop = self.cfg['stop']
self.date = self.cfg['date']
self.trainFraction = self.cfg['TrainingFraction']
self.trainFraction = self.trainFraction[0]
self.videos = self.cfg['video_sets'].keys()
self.bodyparts = self.cfg['bodyparts']
self.colormap = plt.get_cmap(self.cfg['colormap'])
self.colormap = self.colormap.reversed()
self.markerSize = self.cfg['dotsize']
self.alpha = self.cfg['alphavalue']
self.iterationindex=self.cfg['iteration']
self.cropping = self.cfg['cropping']
self.video_names = [Path(i).stem for i in self.videos]
self.config_path = Path(config)
self.load.Enable(False)
self.next.Enable(True)
self.save.Enable(True)
else:
dlg.Destroy()
self.Close(True)
dlg.Destroy()
# Enabling the zoom, pan and home buttons
self.zoom.Enable(True)
self.home.Enable(True)
self.pan.Enable(True)
self.lock.Enable(True)
# Reading config file and its variables
self.cfg = auxiliaryfunctions.read_config(self.config_file)
self.scorer = self.cfg['scorer']
self.bodyparts = self.cfg['bodyparts']
self.videos = self.cfg['video_sets'].keys()
self.markerSize = self.cfg['dotsize']
self.alpha = self.cfg['alphavalue']
self.colormap = plt.get_cmap(self.cfg['colormap'])
self.colormap = self.colormap.reversed()
self.project_path=self.cfg['project_path']
self.index =np.sort([fn for fn in glob.glob(os.path.join(self.dir,'*.png')) if ('labeled.png' not in fn)])
self.statusbar.SetStatusText('Working on folder: {}'.format(os.path.split(str(self.dir))[-1]))
self.relativeimagenames=['labeled'+n.split('labeled')[1] for n in self.index]#[n.split(self.project_path+'/')[1] for n in self.index]
# Reading the existing dataset,if already present
try:
self.dataFrame = pd.read_hdf(os.path.join(self.dir,'CollectedData_'+self.scorer+'.h5'),'df_with_missing')
self.dataFrame.sort_index(inplace=True)
def get_largestshuffle_index(config):
''' Returns the largest shuffle for all dlc-models in the current iteration.'''
cfg = auxiliaryfunctions.read_config(config)
project_path = cfg['project_path']
iterate = 'iteration-'+str(cfg['iteration'])
dlc_model_path = os.path.join(project_path,'dlc-models',iterate)
if os.path.isdir(dlc_model_path):
models = os.listdir(dlc_model_path)
# sort the models directories
models.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
# get the shuffle index
max_shuffle_index = int(models[-1].split('shuffle')[-1])
else:
max_shuffle_index = 0
return(max_shuffle_index)
def dropannotationfileentriesduetodeletedimages(config):
"""
Drop entries for all deleted images in annotation files, i.e. for folders of the type: /labeled-data/*folder*/CollectedData_*scorer*.h5
Will be carried out iteratively for all *folders* in labeled-data.
Parameter
----------
config : string
String containing the full path of the config file in the project.
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
folders = [Path(config).parent / 'labeled-data' /Path(i) for i in video_names]
for folder in folders:
fn=os.path.join(str(folder),'CollectedData_' + cfg['scorer'] + '.h5')
DC = pd.read_hdf(fn, 'df_with_missing')
dropped=False
for imagename in DC.index:
if os.path.isfile(os.path.join(cfg['project_path'],imagename)):
pass
else:
print("Dropping...", imagename)
DC = DC.drop(imagename)
dropped=True
if dropped==True:
def convertannotationdata_fromwindows2unixstyle(config,userfeedback=True):
"""
Converts paths in annotation file (CollectedData_*user*.h5) in labeled-data/videofolder
from windows to linux format. This is important when one e.g. labeling on Windows, but
wants to re-label/check_labels/ on a Linux computer.
Note for training data annotated on Windows in Linux this is not necessary, as the data
gets converted during training set creation.
config : string
Full path of the config.yaml file as a string.
userfeedback: bool, optional
If true the user will be asked specifically for each folder in labeled-data if the containing csv shall be converted to hdf format.
"""
cfg = auxiliaryfunctions.read_config(config)
videos = cfg['video_sets'].keys()
video_names = [Path(i).stem for i in videos]
folders = [Path(config).parent / 'labeled-data' /Path(i) for i in video_names]
for folder in folders:
if userfeedback==True:
print("Do you want to convert the annotationdata in folder:", folder, "?")
askuser = input("yes/no")
else:
askuser="yes"
if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha':
fn=os.path.join(str(folder),'CollectedData_' + cfg['scorer'])
Data = pd.read_hdf(fn+'.h5', 'df_with_missing')
convertpaths_to_unixstyle(Data,fn,cfg)
Parameter
----------
config : string
Full path of the config.yaml file as a string.
forceiterate: int, optional
If an integer is given the iteration variable is set to this value (this is only done if all datasets were labeled or refined)
Example
--------
>>> deeplabcut.merge_datasets('/analysis/project/reaching-task/config.yaml')
--------
"""
import yaml
cfg = auxiliaryfunctions.read_config(config)
config_path = Path(config).parents[0]
bf=Path(str(config_path/'labeled-data'))
allfolders = [os.path.join(bf,fn) for fn in os.listdir(bf) if "_labeled" not in fn] #exclude labeled data folders!
flagged=False
for findex,folder in enumerate(allfolders):
if os.path.isfile(os.path.join(folder,'MachineLabelsRefine.h5')): #Folder that was manually refine...
pass
elif os.path.isfile(os.path.join(folder,'CollectedData_'+cfg['scorer']+'.h5')): #Folder that contains human data set...
pass
else:
print("The following folder was not manually refined,...",folder)
flagged=True
pass #this folder does not contain a MachineLabelsRefine file (not updated...)
if flagged==False:
def edit_config(self, event):
"""
"""
if self.cfg!="":
# For mac compatibility
if platform.system() == 'Darwin':
self.file_open_bool = subprocess.call(['open',self.cfg])
self.file_open_bool = True
else:
self.file_open_bool = webbrowser.open(self.cfg)
if self.file_open_bool:
self.pose_cfg = auxiliaryfunctions.read_config(self.cfg)
else:
raise FileNotFoundError("File not found!")
--------
If you want to analyze multiple videos with shuffle = 2 and save results as an additional csv file too
>>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'], shuffle=2,save_as_csv=True)
--------
"""
if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
del os.environ['TF_CUDNN_USE_AUTOTUNE'] #was potentially set during training
if gputouse is not None: #gpu selection
os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
tf.reset_default_graph()
start_path=os.getcwd() #record cwd to return to this directory in the end
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg['TrainingFraction'][trainingsetindex]
if cropping is not None:
cfg['cropping']=True
cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']=cropping
print("Overwriting cropping parameters:", cropping)
print("These are used for all videos, but won't be save to the cfg file.")
modelfolder=os.path.join(cfg["project_path"],str(auxiliaryfunctions.GetModelFolder(trainFraction,shuffle,cfg)))
path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
try:
dlc_cfg = load_config(str(path_test_config))
except FileNotFoundError:
raise FileNotFoundError("It seems the model for shuffle %s and trainFraction %s does not exist."%(shuffle,trainFraction))
# Check which snapshots are available and sort them by # iterations
augmenter_type: string
Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.
Example
--------
>>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
Windows:
>>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
--------
"""
from skimage import io
import scipy.io as sio
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
scorer = cfg['scorer']
project_path = cfg['project_path']
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg) #Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(Path(os.path.join(project_path,str(trainingsetfolder))),recursive=True)
Data = merge_annotateddatasets(cfg,project_path,Path(os.path.join(project_path,trainingsetfolder)),windows2linux)
Data = Data[scorer] #extract labeled data
#loading & linking pretrained models
if net_type is None: #loading & linking pretrained models
net_type =cfg.get('default_net_type', 'resnet_50')
else:
if 'resnet' in net_type or 'mobilenet' in net_type:
pass
else: