Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _start(self):
vardb = self.pkg.root_config.trees["vartree"].dbapi
dbdir = vardb.getpath(self.pkg.cpv)
if not os.path.exists(dbdir):
# Apparently the package got uninstalled
# already, so we can safely return early.
self.returncode = os.EX_OK
self._async_wait()
return
self.settings.setcpv(self.pkg)
cat, pf = portage.catsplit(self.pkg.cpv)
myebuildpath = os.path.join(dbdir, pf + ".ebuild")
try:
portage.doebuild_environment(myebuildpath, "prerm",
settings=self.settings, db=vardb)
except UnsupportedAPIException:
# This is safe to ignore since this function is
# guaranteed to set PORTAGE_BUILDDIR even though
yield self._build_dir.async_lock()
else:
self.settings.pop('PORTAGE_IPC_DAEMON', None)
else:
# Since the IPC daemon is disabled, use a simple tempfile based
# approach to detect unexpected exit like in bug #190128.
self.settings.pop('PORTAGE_IPC_DAEMON', None)
if self.phase not in self._phases_without_builddir:
exit_file = os.path.join(
self.settings['PORTAGE_BUILDDIR'],
'.exit_status')
self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
try:
os.unlink(exit_file)
except OSError:
if os.path.exists(exit_file):
# make sure it doesn't exist
raise
else:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if start_ipc_daemon:
self.settings['PORTAGE_IPC_DAEMON'] = "1"
self._start_ipc_daemon()
if self.fd_pipes is None:
self.fd_pipes = {}
null_fd = None
if 0 not in self.fd_pipes and \
self.phase not in self._phases_interactive_whitelist and \
"interactive" not in self.settings.get("PROPERTIES", "").split():
null_fd = os.open('/dev/null', os.O_RDONLY)
def action_metadata(settings, portdb, myopts, porttrees=None):
if porttrees is None:
porttrees = portdb.porttrees
portage.writemsg_stdout("\n>>> Updating Portage cache\n")
cachedir = os.path.normpath(settings.depcachedir)
if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
"/lib", "/opt", "/proc", "/root", "/sbin",
"/sys", "/tmp", "/usr", "/var"]:
print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
"ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
sys.exit(73)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
auxdbkeys = portdb._known_keys
class TreeData:
__slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
def __init__(self, dest_db, eclass_db, path, src_db):
self.dest_db = dest_db
self.eclass_db = eclass_db
self.path = path
self.src_db = src_db
self.valid_nodes = set()
porttrees_data = []
for path in porttrees:
src_db = portdb._pregen_auxdb.get(path)
slot = self.settings.get(var_name, '')
if not slot.strip():
showMessage(_("!!! SLOT is undefined\n"),
level=logging.ERROR, noiselevel=-1)
return 1
write_atomic(os.path.join(inforoot, var_name), slot + '\n')
if val != self.settings.get(var_name, ''):
self._eqawarn('preinst',
[_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
{"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
def eerror(lines):
self._eerror("preinst", lines)
if not os.path.exists(self.dbcatdir):
ensure_dirs(self.dbcatdir)
otherversions = []
for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
otherversions.append(v.split("/")[1])
cp = self.mysplit[0]
slot_atom = "%s:%s" % (cp, slot)
# filter any old-style virtual matches
slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
if cpv_getkey(cpv) == cp]
if self.mycpv not in slot_matches and \
self.vartree.dbapi.cpv_exists(self.mycpv):
# handle multislot or unapplied slotmove
apply_secpass_permissions(download_path,
gid=portage_gid, mode=0o664, mask=0o2)
except FileNotFound:
pass
except PortageException as e:
if not os.access(download_path, os.R_OK):
writemsg(_("!!! Failed to adjust permissions:"
" %s\n") % str(e), noiselevel=-1)
del e
# If the file is empty then it's obviously invalid. Don't
# trust the return value from the fetcher. Remove the
# empty file and try to download again.
try:
mystat = os.lstat(download_path)
if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
os.unlink(download_path)
fetched = 0
continue
except EnvironmentError:
pass
if mydigests is not None and myfile in mydigests:
try:
mystat = os.stat(download_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
raise
del e
fetched = 0
else:
def _open_shelve(self, db_file, db_desc):
if self.options.dry_run:
open_flag = "r"
else:
open_flag = "c"
if self.options.dry_run and not os.path.exists(db_file):
db = {}
else:
try:
db = shelve.open(db_file, flag=open_flag)
except ImportError as e:
# ImportError has different attributes for python2 vs. python3
if (getattr(e, 'name', None) == 'bsddb' or
getattr(e, 'message', None) == 'No module named bsddb'):
from bsddb3 import dbshelve
db = dbshelve.open(db_file, flags=open_flag)
if self.options.dry_run:
logging.warning("dry-run: %s db opened in readonly mode" % db_desc)
if not isinstance(db, dict):
volatile_db = dict((k, db[k]) for k in db)
db.close()
'''
retvcs = []
pathprep = cwd
while depth is None or depth > 0:
for vcs_type in _FindVCS_data:
vcs_dir = os.path.join(pathprep, vcs_type.dir_name)
if os.path.isdir(vcs_dir):
logging.debug(
'FindVCS: found %(name)s dir: %(vcs_dir)s' % {
'name': vcs_type.name,
'vcs_dir': os.path.abspath(vcs_dir)})
retvcs.append(vcs_type.name)
elif vcs_type.file_name:
vcs_file = os.path.join(pathprep, vcs_type.file_name)
if os.path.exists(vcs_file):
logging.debug(
'FindVCS: found %(name)s file: %(vcs_file)s' % {
'name': vcs_type.name,
'vcs_file': os.path.abspath(vcs_file)})
retvcs.append(vcs_type.name)
if retvcs:
break
pathprep = os.path.join(pathprep, '..')
if os.path.realpath(pathprep).strip('/') == '':
break
if depth is not None:
depth = depth - 1
return retvcs
def new(self, **kwargs):
if kwargs:
self._kwargs(kwargs)
try:
if not os.path.exists(self.repo.location):
os.makedirs(self.repo.location)
self.logger(self.self.xterm_titles,
'Created New Directory %s ' % self.repo.location )
except IOError:
return (1, False)
return self.update()
def _remove_failed_dirs(self, failed_pkgs):
"""
Remove the directories of packages that failed to merge.
@param failed_pkgs: failed packages whose directories to remove
@type failed_pkg: dict
"""
for failed_pkg in failed_pkgs:
pkg_path = os.path.join(self._vardb_path, failed_pkg)
# delete failed merge directory if it exists (it might not exist
# if loaded from tracking file)
if os.path.exists(pkg_path):
shutil.rmtree(pkg_path)
# TODO: try removing package CONTENTS to prevent orphaned
def exists(self, **kwargs):
'''Tests whether the repo actually exists'''
return os.path.exists(os.path.join(self.repo.location, '.svn'))