Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _write_metakey_to_restored_group_task(self, processor):
job_tasks = []
metakey = storage.Group.compose_uncoupled_lrc_group_meta(
lrc_groups=self.lrc_groups,
scheme=storage.Lrc.make_scheme(self.scheme),
)
job_tasks.append(
tasks.WriteMetaKeyTask.new(
self,
group=self.group,
metakey=metakey,
)
)
return job_tasks
def _generate_metakey(self, couple_id, groups, processor):
try:
# create dummy couple to construct metakey
couple = storage.Couple([storage.Group(couple_id)])
ns = self._get_namespace()
ns.add_couple(couple)
try:
settings = {
'scheme': self.scheme,
'part_size': self.part_size,
}
Groupset = storage.groupsets.make_groupset_type(
type=storage.GROUPSET_LRC,
settings=settings,
)
metakey = processor._compose_groupset_metakey(
groupset_type=Groupset,
groups=groups,
couple=couple,
settings=settings,
def _get_type(self, meta):
if self.meta:
if 'type' in self.meta and self.meta['type'] not in self.AVAILABLE_TYPES:
logger.error('Unknown type "{type}" of group {group}'.format(
group=self,
type=self.meta['type'],
))
return self.TYPE_UNKNOWN
return self.meta.get('type', self.TYPE_DATA)
else:
if self.couple:
return storage.Group.TYPE_DATA
def is_cache_group_backend(nb):
if not storage.CACHE_GROUP_PATH_PREFIX:
return False
return nb.base_path.startswith(storage.CACHE_GROUP_PATH_PREFIX)
is_uncoupled_cache_group = any(is_cache_group_backend(nb) for nb in self.node_backends)
if is_uncoupled_cache_group:
return self.TYPE_UNCOUPLED_CACHE
return storage.Group.TYPE_UNCOUPLED
# weights
for ns_id in weight_manager.weights:
res[ns_id]['weights'] = dict(
(str(k), v) for k, v in weight_manager.weights[ns_id].iteritems()
)
logger.info('Namespace {}: weights are updated by weight manager'.format(
ns_id
))
# statistics
for ns, stats in self.statistics.per_ns_statistics().iteritems():
res[ns]['statistics'] = stats
# removing internal namespaces that clients should not know about
res.pop(storage.Group.CACHE_NAMESPACE, None)
self._namespaces_states.set_result(dict(res))
uncoupled_groups = self.select_uncoupled_groups(group)
logger.info('Group {0} will be moved to uncoupled groups {1}'.format(
group.group_id, [g.group_id for g in uncoupled_groups]))
else:
dc, fsid = None, None
self.node_info_updater.update_status(uncoupled_groups)
locked_hosts = manual_locker.get_locked_hosts()
for unc_group in uncoupled_groups:
if len(unc_group.node_backends) != 1:
raise ValueError(
'Group {0} has {1} node backends, currently '
'only groups with 1 node backend can be used'.format(
unc_group.group_id, len(unc_group.node_backends)))
is_good = infrastructure.is_uncoupled_group_good(
unc_group, locked_hosts, [storage.Group.TYPE_UNCOUPLED], max_node_backends=1)
if not is_good:
raise ValueError('Uncoupled group {0} is not applicable'.format(
unc_group.group_id))
nb = unc_group.node_backends[0]
try:
host_dc = nb.node.host.dc
except CacheUpstreamError:
raise RuntimeError('Failed to get dc for host {}'.format(
nb.node.host))
if not dc:
dc, fsid = host_dc, nb.fs.fsid
elif dc != host_dc or fsid != nb.fs.fsid:
raise ValueError(
'All uncoupled groups should be located on a single hdd on the same host')
def _required_group_types(self):
return {
self.uncoupled_group: storage.Group.TYPE_UNCOUPLED,
}
def _build_lrc_tree(self):
node_types = (self.DC_NODE_TYPE,) + ('host',)
tree, nodes = infrastructure.infrastructure.filtered_cluster_tree(node_types)
# NOTE:
# lrc groups that are currently being processed by running jobs
# are not accounted here because there is no easy and
# straightforward way to do this. This is not crucial
# at the moment.
lrc_types = (storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1, storage.Group.TYPE_LRC_8_2_2_V1)
lrc_groups = (
group
for group in storage.groups.keys()
if group.type in lrc_types
)
# TODO: rename, nothing about "ns" here
infrastructure.infrastructure.account_ns_groups(nodes, lrc_groups)
infrastructure.infrastructure.update_groups_list(tree)
return tree, nodes
logger.error(
'Error on updating metakey from group {group}: {error}'.format(
group=group,
error=response.error.message,
)
)
else:
raise RuntimeError(response.error.mssage)
return
meta = response.data
group.parse_meta(meta)
if group.type == storage.Group.TYPE_UNCOUPLED_LRC_8_2_2_V1:
return
ns_id = group.meta.get('namespace')
if ns_id is None:
logger.error(
'Inconsistent meta read from group {group}, missing namespace: {meta}'.format(
group=group,
meta=group.meta,
)
)
return
if group.type == storage.Group.TYPE_DATA:
groups = _get_data_groups(group)
elif group.type == storage.Group.TYPE_LRC_8_2_2_V1:
groups = _get_lrc_groups(group)
(str(k), v) for k, v in weight_manager.weights[ns_id].iteritems()
)
logger.info('Namespace {}: weights are updated by weight manager'.format(
ns_id
))
# statistics
for ns, stats in self.statistics.per_ns_statistics(per_entity_stat).iteritems():
res[ns]['statistics'] = stats
# settings
for ns_settings in namespaces_settings:
res[ns_settings.namespace]['settings'] = ns_settings.dump()
# removing internal namespaces that clients should not know about
res.pop(storage.Group.CACHE_NAMESPACE, None)
for ns_id, ns_state in res.iteritems():
logger.debug(
'Namespace state: namespace: {ns}, couples: {couples_count}, weighted couples: '
'{weighted_couples_count}'.format(
ns=ns_id,
couples_count=len(ns_state['couples']),
weighted_couples_count=sum(
(len(size_weights) for size_weights in ns_state['weights'].itervalues()),
0
),
)
)
return dict(res)