Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
self._built_class = ebuild_built.fresh_built_package
format.build.__init__(self, domain, pkg, verified_files, observer)
domain_settings = self.domain.settings
ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs)
self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files")
self.eclass_cache = eclass_cache
self.run_test = force_test or self.feat_or_bool("test", domain_settings)
self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings)
if "test" in self.restrict:
self.run_test = False
elif not force_test and "test" not in pkg.use:
if self.run_test:
logger.warning(f"disabling test for {pkg} due to test use flag being disabled")
self.run_test = False
# XXX minor hack
path = self.env["PATH"].split(os.pathsep)
for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")):
b = (self.feat_or_bool(s, domain_settings) and
s not in self.restrict)
setattr(self, s.lower(), b)
if b:
# looks weird I realize, but
# pjoin("/foor/bar", "/barr/foo") == "/barr/foo"
# and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf"
self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default))
# gentoo bug 355283
libdir = self.env.get("ABI")
if inherited:
mydata["_eclasses_"] = self._ecache.get_eclass_data(
inherited.split())
mydata['_chf_'] = chksum.LazilyHashedPath(pkg.path)
for x in wipes:
del mydata[x]
if self._cache is not None:
for cache in self._cache:
if not cache.readonly:
try:
cache[pkg.cpvstr] = mydata
except cache_errors.CacheError as e:
logger.warning("caught cache error: %s", e)
del e
continue
break
return mydata
def category_dirs(self):
try:
return frozenset(map(intern, filterfalse(
self.false_categories.__contains__,
(x for x in listdir_dirs(self.base) if not x.startswith('.')))))
except EnvironmentError as e:
logger.error(f"failed listing categories: {e}")
return ()
f"file {filename!r}: {raw_line!r} on line {lineno}: "
f"{src} was already moved to {moved[src.key]}, "
"this line is redundant")
continue
elif src.slot is not None:
logger.error(
f"file {filename!r}: {raw_line!r} on line {lineno}: "
"slotted atom makes no sense for slotmoves")
continue
src_slot = atom(f'{src}:{line[2]}')
trg_slot = atom(f'{src.key}:{line[3]}')
mods[src.key][1].append(('slotmove', src_slot, line[3]))
else:
logger.error(
f'file {filename!r}: {raw_line!r} on line {lineno}: unknown command')
def is_supported(self):
"""Check if an EAPI is supported."""
if EAPI.known_eapis.get(self._magic) is not None:
if not self.options.is_supported:
logger.warning(f"EAPI '{self}' isn't fully supported")
sys.stderr.flush()
return True
return False
def _parse_atom_negations(self, data):
"""Parse files containing optionally negated package atoms."""
neg, pos = [], []
for line, lineno, path in data:
if line[0] == '-':
line = line[1:]
if not line:
logger.error(f"{path!r}, line {lineno}: '-' negation without an atom")
continue
l = neg
else:
l = pos
try:
l.append(self.eapi_atom(line))
except ebuild_errors.MalformedAtom as e:
logger.error(f'{path!r}, line {lineno}: parsing error: {e}')
return tuple(neg), tuple(pos)
return pos
elif (interpret_level == COMMAND_PARSING and ch in ';\n') or \
(interpret_level == SPACE_PARSING and isspace(ch)):
return pos
elif ch == '\\':
pos += 1
elif ch == '<':
if (pos < end - 1 and buff[pos + 1] == '<' and
interpret_level == COMMAND_PARSING):
pos = walk_here_statement(buff, pos + 1)
# we continue immediately; walk_here deposits us at the end
# of the here op, not consuming the final delimiting char
# since it may be an endchar
continue
else:
logger.debug(f'noticed <, interpret_level={interpret_level}')
elif ch == '#':
if start == pos or isspace(buff[pos - 1]) or buff[pos - 1] == ';':
pos = walk_statement_pound(buff, pos)
continue
elif ch == '$':
pos = walk_dollar_expansion(buff, pos + 1, end, endchar)
continue
elif ch == '{':
pos = walk_command_escaped_parsing(buff, pos + 1, '}')
elif ch == '(' and interpret_level == COMMAND_PARSING:
pos = walk_command_escaped_parsing(buff, pos + 1, ')')
elif ch in '`"':
pos = walk_command_escaped_parsing(buff, pos + 1, ch)
elif ch == "'" and endchar != '"':
pos = walk_statement_no_parsing(buff, pos +1, "'")
pos += 1
window_end = None
com_start = pos
ch = buff[pos]
if isspace(ch):
pos += 1
continue
# Ignore comments.
if ch == '#':
pos = walk_statement_pound(buff, pos, endchar)
continue
new_start, new_end, new_p = is_function(buff, pos)
if new_p is not None:
func_name = buff[new_start:new_end]
logger.debug(f'matched func name {func_name!r}')
new_p = process_scope(None, buff, new_p, None, None, '}',
func_callback=func_callback,
func_level=func_level+1)
logger.debug(f'ended processing {func_name!r}')
if func_callback is not None:
func_callback(func_level, func_name, buff[new_start:new_p])
if func_match is not None and func_match(func_name):
logger.debug(f'filtering func {func_name!r}')
window_end = com_start
pos = new_p
pos += 1
continue
# Check for env assignment.
new_start, new_end, new_p = is_envvar(buff, pos)
if new_p is None:
# Non env assignment.
suffix = 'S'
out.write(out.fg('yellow'), f"[{suffix}]")
elif op.desc == 'replace':
if op.pkg != op.old_pkg:
if op.pkg > op.old_pkg:
op_type = 'upgrade'
else:
op_type = 'downgrade'
out.write(
out.fg('yellow'),
f"[{op_type[0].upper()} {op.old_pkg.fullver}]")
else:
out.write(out.fg('yellow'), "[R]")
else:
# shouldn't reach here
logger.warning("unknown op type encountered: desc(%r), %r", op.desc, op)
self.visit_op(op_type)
red = out.fg('red')
green = out.fg('green')
flags = []
use = set(op.pkg.use)
for flag in sorted(op.pkg.iuse_stripped):
if flag in use:
flags.extend((green, flag, ' '))
else:
flags.extend((red, '-', flag, ' '))
if flags:
out.write(' ')
# Throw away the final space.
out.write(*flags[:-1])
out.write('\n')
def update_mtime(path, timestamp=None):
if timestamp is None:
timestamp = time.time()
logger.debug(f"updating vdb timestamp for {path!r}")
try:
os.utime(path, (timestamp, timestamp))
except EnvironmentError as e:
logger.error(f"failed updated vdb timestamp for {path!r}: {e}")