Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
else:
self._fetch_tmp_dir_info = 'distfiles'
distdir = self.config.options.distfiles
tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
variables = {
"DISTDIR": distdir,
"URI": uri,
"FILE": tmp_basename
}
self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
try:
os.unlink(self._fetch_tmp_file)
except OSError:
pass
args = portage.util.shlex_split(default_fetchcommand)
args = [portage.util.varexpand(x, mydict=variables)
for x in args]
args = [_unicode_encode(x,
encoding=_encodings['fs'], errors='strict') for x in args]
null_fd = os.open(os.devnull, os.O_RDONLY)
fetcher = PopenProcess(background=self.background,
proc=subprocess.Popen(args, stdin=null_fd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
scheduler=self.scheduler)
os.close(null_fd)
def _fetch_copier_exit(self, copier):
self._assert_current(copier)
try:
os.unlink(self._fetch_tmp_file)
except OSError:
pass
if self._was_cancelled():
self.wait()
return
if copier.returncode == os.EX_OK:
self._make_layout_links()
else:
# out of space?
msg = "%s %s copy failed unexpectedly: %s" % \
(self.distfile, self._fetch_tmp_dir_info, copier.future.exception())
self.scheduler.output(msg + '\n', background=True,
log_path=self._log_path)
logging.error(msg)
def _success(self):
if not self._previously_added:
size = self.digests["size"]
self.config.added_byte_count += size
self.config.added_file_count += 1
self.config.log_success("%s\t%s\tadded %i bytes" %
(self.cpv, self.distfile, size))
if self._log_path is not None:
if not self.config.options.dry_run:
try:
os.unlink(self._log_path)
except OSError:
pass
if self.config.options.recycle_dir is not None:
recycle_file = os.path.join(
self.config.options.recycle_dir, self.distfile)
if self.config.options.dry_run:
if os.path.exists(recycle_file):
logging.info("dry-run: delete '%s' from recycle" %
(self.distfile,))
else:
try:
os.unlink(recycle_file)
except OSError:
try:
if hashname not in hashfunc_keys:
raise portage.exception.DigestException(hashname + \
" hash function not available (needs dev-python/pycrypto)")
myhash, mysize = hashfunc_map[hashname].checksum_file(myfilename)
except (OSError, IOError) as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
raise portage.exception.FileNotFound(myfilename)
elif e.errno == portage.exception.PermissionDenied.errno:
raise portage.exception.PermissionDenied(myfilename)
raise
return myhash, mysize
finally:
if prelink_tmpfile:
try:
os.unlink(prelink_tmpfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
def _start(self):
pkg = self.pkg
pretend = self.pretend
bintree = pkg.root_config.trees["bintree"]
settings = bintree.settings
pkg_path = self.pkg_path
exists = os.path.exists(pkg_path)
resume = exists and os.path.basename(pkg_path) in bintree.invalids
if not (pretend or resume):
# Remove existing file or broken symlink.
try:
os.unlink(pkg_path)
except OSError:
pass
# urljoin doesn't work correctly with
# unrecognized protocols like sftp
if bintree._remote_has_index:
instance_key = bintree.dbapi._instance_key(pkg.cpv)
rel_uri = bintree._remotepkgs[instance_key].get("PATH")
if not rel_uri:
rel_uri = pkg.cpv + ".tbz2"
remote_base_uri = bintree._remotepkgs[
instance_key]["BASE_URI"]
uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
else:
uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
"/" + pkg.pf + ".tbz2"
exitcode = 128 + signal.SIGINT
# 0 Success
# 1 Syntax or usage error
# 2 Protocol incompatibility
# 5 Error starting client-server protocol
# 35 Timeout waiting for daemon connection
if exitcode not in (0, 1, 2, 5, 35):
# If the exit code is not among those listed above,
# then we may have a partial/inconsistent sync
# state, so our previously read timestamp as well
# as the corresponding file can no longer be
# trusted.
timestamp = 0
try:
os.unlink(self.servertimestampfile)
except OSError:
pass
else:
updatecache_flg = True
if exitcode in [0,1,3,4,11,14,20,21]:
is_synced = True
elif exitcode in [1,3,4,11,14,20,21]:
is_synced = True
else:
# Code 2 indicates protocol incompatibility, which is expected
# for servers with protocol < 29 that don't support
# --prune-empty-directories. Retry for a server that supports
# at least rsync protocol version 29 (>=rsync-2.6.4).
pass
@return: a tuple of (removed, fstat_result), where removed is True if
lock_path does not correspond to lock_fd, and False otherwise
"""
try:
fstat_st = os.fstat(lock_fd)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
return (True, None)
# Since stat is not reliable for removed files on NFS with the default
# file attribute cache behavior ('ac' mount option), create a temporary
# hardlink in order to prove that the file path exists on the NFS server.
hardlink_path = hardlock_name(lock_path)
try:
os.unlink(hardlink_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
try:
try:
os.link(lock_path, hardlink_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
return (True, None)
hardlink_stat = os.stat(hardlink_path)
if hardlink_stat.st_ino != fstat_st.st_ino or hardlink_stat.st_dev != fstat_st.st_dev:
# Create another hardlink in order to detect whether or not
# hardlink inode numbers are expected to match. For example,
# inode numbers are not expected to match for sshfs.
def unhardlink_lockfile(lockfilename, unlinkfile=True):
myhardlock = hardlock_name(lockfilename)
if unlinkfile and hardlink_is_mine(myhardlock, lockfilename):
# Make sure not to touch lockfilename unless we really have a lock.
try:
os.unlink(lockfilename)
except OSError:
pass
try:
os.unlink(myhardlock)
except OSError:
pass
build_info = {
"BINPKGMD5": "%s\n" % pkg._metadata["MD5"],
}
if pkg.build_id is not None:
build_info["BUILD_ID"] = "%s\n" % pkg.build_id
for k, v in build_info.items():
with io.open(_unicode_encode(
os.path.join(infoloc, k),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
f.write(v)
else:
if "PORTAGE_BINPKG_TMPFILE" in mysettings:
try:
os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
except OSError:
pass
elif returnpid:
writemsg("!!! doebuild: %s\n" %
_("returnpid is not supported for phase '%s'\n" % mydo),
noiselevel=-1)
if regular_actionmap_phase:
# handled above
pass
elif mydo == "qmerge":
# check to ensure install was run. this *only* pops up when users
# forget it and are using ebuild
if not os.path.exists(
os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
if update_manifest:
if myentries or not (self.thin or self.allow_missing):
# If myentries is empty, don't write an empty manifest
# when thin or allow_missing is enabled. Except for
# thin manifests with no DIST entries, myentries is
# non-empty for all currently known use cases.
write_atomic(self.getFullname(), "".join("%s\n" %
str(myentry) for myentry in myentries))
self._apply_max_mtime(preserved_stats, myentries)
rval = True
else:
# With thin manifest, there's no need to have
# a Manifest file if there are no DIST entries.
try:
os.unlink(self.getFullname())
except OSError as e:
if e.errno != errno.ENOENT:
raise
rval = True
if sign:
self.sign()
except (IOError, OSError) as e:
if e.errno == errno.EACCES:
raise PermissionDenied(str(e))
raise
return rval