Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
kwargs = {}
for k in self._spawn_kwarg_names:
v = getattr(self, k)
if v is not None:
kwargs[k] = v
kwargs["fd_pipes"] = fd_pipes
kwargs["returnpid"] = True
kwargs.pop("logfile", None)
retval = self._spawn(self.args, **kwargs)
os.close(slave_fd)
if null_input is not None:
os.close(null_input)
if isinstance(retval, int):
# spawn failed
self.returncode = retval
self._async_wait()
return
self.pid = retval[0]
stdout_fd = None
if can_log and not self.background:
stdout_fd = os.dup(fd_pipes_orig[1])
build_logger = BuildLogger(env=self.env,
log_path=log_file_path,
log_filter_file=self.log_filter_file,
def _test_lock(fd, lock_path):
os.close(fd)
try:
with open(lock_path, 'a') as f:
fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except EnvironmentError as e:
if e.errno == errno.EAGAIN:
# Parent process holds lock, as expected.
sys.exit(0)
# Something went wrong.
sys.exit(1)
lines = _unicode_decode(output).split('\n')
if len(lines) == 1:
self._buf += lines[0]
else:
lines[0] = self._buf + lines[0]
self._buf = lines.pop()
out = io.StringIO()
for line in lines:
funcname, phase, key, msg = line.split(' ', 3)
self._elog_keys.add(key)
reporter = getattr(portage.elog.messages, funcname)
reporter(msg, phase=phase, key=key, out=out)
elif output is not None: # EIO/POLLHUP
self.scheduler.remove_reader(self._elog_reader_fd)
os.close(self._elog_reader_fd)
self._elog_reader_fd = None
return False
except OSError:
pass
args = portage.util.shlex_split(default_fetchcommand)
args = [portage.util.varexpand(x, mydict=variables)
for x in args]
args = [_unicode_encode(x,
encoding=_encodings['fs'], errors='strict') for x in args]
null_fd = os.open(os.devnull, os.O_RDONLY)
fetcher = PopenProcess(background=self.background,
proc=subprocess.Popen(args, stdin=null_fd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
scheduler=self.scheduler)
os.close(null_fd)
fetcher.pipe_reader = PipeLogger(background=self.background,
input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
scheduler=self.scheduler)
self._start_task(fetcher, self._fetcher_exit)
# requirement, since that's not necessarily true for the
# default directory used by the tempfile module.
if self.usersync_uid is not None:
tmpdir = os.path.join(self.settings['PORTAGE_TMPDIR'], 'portage')
ensure_dirs_kwargs = {}
if portage.secpass >= 1:
ensure_dirs_kwargs['gid'] = portage.portage_gid
ensure_dirs_kwargs['mode'] = 0o70
ensure_dirs_kwargs['mask'] = 0
portage.util.ensure_dirs(tmpdir, **ensure_dirs_kwargs)
else:
# use default dir from tempfile module
tmpdir = None
fd, tmpservertimestampfile = \
tempfile.mkstemp(dir=tmpdir)
os.close(fd)
if self.usersync_uid is not None:
portage.util.apply_permissions(tmpservertimestampfile,
uid=self.usersync_uid)
command = rsynccommand[:]
command.append('--inplace')
command.append(syncuri.rstrip("/") + \
"/metadata/timestamp.chk")
command.append(tmpservertimestampfile)
content = None
pids = []
try:
# Timeout here in case the server is unresponsive. The
# --timeout rsync option doesn't apply to the initial
# connection attempt.
try:
if self.rsync_initial_timeout:
"%s the local copy of metadata.xsd "
"needs to be refetched, doing that now" % green("***"))
print()
parsed_url = urlparse(metadata_xsd_uri)
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = repoman_settings.get(setting)
if not fcmd:
fcmd = repoman_settings.get('FETCHCOMMAND')
if not fcmd:
logging.error("FETCHCOMMAND is unset")
return False
destdir = repoman_settings["DISTDIR"]
fd, metadata_xsd_tmp = tempfile.mkstemp(
prefix='metadata.xsd.', dir=destdir)
os.close(fd)
try:
if not portage.getbinpkg.file_get(
metadata_xsd_uri, destdir, fcmd=fcmd,
filename=os.path.basename(metadata_xsd_tmp)):
logging.error(
"failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri)
return False
try:
portage.util.apply_secpass_permissions(
metadata_xsd_tmp,
gid=portage.data.portage_gid, mode=0o664, mask=0o2)
except portage.exception.PortageException:
pass
def _unregister(self):
"""
Unregister from the scheduler and close open files.
"""
self._registered = False
if self._files is not None:
for f in self._files.values():
self.scheduler.remove_reader(f)
os.close(f)
self._files = None
def _unregister(self):
self._registered = False
if self._files is not None:
try:
pipe_in = self._files.pop('pipe_in')
except KeyError:
pass
else:
self.scheduler.remove_reader(pipe_in)
os.close(pipe_in)