How to use the filelock.SoftFileLock function in filelock

To help you get started, we’ve selected a few filelock examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github izderadicka / mybookshelf2 / app / logic.py View on Github external
def create_new_location(source, upload, move=False):
    base_dir = current_app.config['BOOKS_BASE_DIR']
    if isinstance(upload, model.Upload):
        new_file = os.path.join(current_app.config['UPLOAD_DIR'], upload.file)
    else:
        new_file = upload
    new_location = os.path.join(source.ebook.base_dir, os.path.basename(norm_file_name(source)))
    #if source.ebook.base_dir else norm_file_name(source) #TODO: Remove this WA
    ebook_dir = os.path.join(base_dir, os.path.split(new_location)[0])
    if not os.path.exists(ebook_dir):
        os.makedirs(ebook_dir, exist_ok=True)
    lock_file = os.path.join(ebook_dir, '.lock_this_dir')
    index = 1
    with filelock.SoftFileLock(lock_file, timeout=5):
        while os.path.exists(os.path.join(base_dir, new_location)):
            name, ext = os.path.splitext(new_location)
            new_location = name + '(%d)' % index + ext
            index += 1
        if move:
            shutil.move(new_file, os.path.join(base_dir, new_location))
        else:
            shutil.copy(new_file, os.path.join(base_dir, new_location))

    return new_location
github nipype / pydra / pydra / engine / core.py View on Github external
if result is not None:
                return result
        # creating connections that were defined after adding tasks to the wf
        for task in self.graph.nodes:
            # if workflow has task_rerun=True and propagate_rerun=True,
            # it should be passed to the tasks
            if self.task_rerun and self.propagate_rerun:
                task.task_rerun = self.task_rerun
                # if the task is a wf, than the propagate_rerun should be also set
                if is_workflow(task):
                    task.propagate_rerun = self.propagate_rerun
            task.cache_locations = task._cache_locations + self.cache_locations
            self.create_connections(task)
        # TODO add signal handler for processes killed after lock acquisition
        self.hooks.pre_run(self)
        with SoftFileLock(lockfile):
            # # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            self.audit.start_audit(odir=odir)
            result = Result(output=None, runtime=None, errored=False)
            self.hooks.pre_run_task(self)
            try:
                self.audit.monitor()
                await self._run_task(submitter, rerun=rerun)
                result.output = self._collect_outputs()
            except Exception as e:
                record_error(self.output_dir, e)
                result.errored = True
github nipype / pydra / pydra / engine / core.py View on Github external
def _run(self, rerun=False, **kwargs):
        self.inputs = attr.evolve(self.inputs, **kwargs)
        self.inputs.check_fields_input_spec()
        checksum = self.checksum
        lockfile = self.cache_dir / (checksum + ".lock")
        # Eagerly retrieve cached - see scenarios in __init__()
        self.hooks.pre_run(self)
        # TODO add signal handler for processes killed after lock acquisition
        with SoftFileLock(lockfile):
            if not (rerun or self.task_rerun):
                result = self.result()
                if result is not None:
                    return result
            # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            orig_inputs = attr.asdict(self.inputs)
            map_copyfiles = copyfile_input(self.inputs, self.output_dir)
            modified_inputs = template_update(self.inputs, map_copyfiles)
            if modified_inputs:
                self.inputs = attr.evolve(self.inputs, **modified_inputs)
            self.audit.start_audit(odir)
github nipype / pydra / pydra / engine / core.py View on Github external
if result is not None:
                return result
        # creating connections that were defined after adding tasks to the wf
        for task in self.graph.nodes:
            # if workflow has task_rerun=True and propagate_rerun=True,
            # it should be passed to the tasks
            if self.task_rerun and self.propagate_rerun:
                task.task_rerun = self.task_rerun
                # if the task is a wf, than the propagate_rerun should be also set
                if is_workflow(task):
                    task.propagate_rerun = self.propagate_rerun
            task.cache_locations = task._cache_locations + self.cache_locations
            self.create_connections(task)
        # TODO add signal handler for processes killed after lock acquisition
        self.hooks.pre_run(self)
        with SoftFileLock(lockfile):
            # # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            self.audit.start_audit(odir=odir)
            result = Result(output=None, runtime=None, errored=False)
            self.hooks.pre_run_task(self)
            try:
                self.audit.monitor()
                await self._run_task(submitter, rerun=rerun)
                result.output = self._collect_outputs()
            except Exception as e:
                record_error(self.output_dir, e)
                result.errored = True
github nipype / pydra / pydra / engine / core.py View on Github external
def _run(self, rerun=False, **kwargs):
        self.inputs = attr.evolve(self.inputs, **kwargs)
        self.inputs.check_fields_input_spec()
        checksum = self.checksum
        lockfile = self.cache_dir / (checksum + ".lock")
        # Eagerly retrieve cached - see scenarios in __init__()
        self.hooks.pre_run(self)
        # TODO add signal handler for processes killed after lock acquisition
        with SoftFileLock(lockfile):
            if not (rerun or self.task_rerun):
                result = self.result()
                if result is not None:
                    return result
            # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
            orig_inputs = attr.asdict(self.inputs)
            map_copyfiles = copyfile_input(self.inputs, self.output_dir)
            modified_inputs = template_update(self.inputs, map_copyfiles)
            if modified_inputs:
                self.inputs = attr.evolve(self.inputs, **modified_inputs)
            self.audit.start_audit(odir)
github nipype / pydra / pydra / engine / helpers.py View on Github external
Result to pickle and write
    task : :class:`~pydra.engine.core.TaskBase`
        Task to pickle and write
    """

    if task is None and result is None:
        raise ValueError("Nothing to be saved")

    if not isinstance(task_path, Path):
        task_path = Path(task_path)
    task_path.mkdir(parents=True, exist_ok=True)
    if name_prefix is None:
        name_prefix = ""

    lockfile = task_path.parent / (task_path.name + "_save.lock")
    with SoftFileLock(lockfile):
        if result:
            if task_path.name.startswith("Workflow") and result.output is not None:
                # copy files to the workflow directory
                result = copyfile_workflow(wf_path=task_path, result=result)
            with (task_path / f"{name_prefix}_result.pklz").open("wb") as fp:
                cp.dump(result, fp)
        if task:
            with (task_path / f"{name_prefix}_task.pklz").open("wb") as fp:
                cp.dump(task, fp)

filelock

A platform independent file lock.

Unlicense
Latest version published 1 month ago

Package Health Score

94 / 100
Full package analysis