Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def status_of_elasticsearch_indices(connection, **kwargs):
check = CheckResult(connection, 'status_of_elasticsearch_indices')
### the check
client = es_utils.create_es_client(connection.ff_es, True)
indices = client.cat.indices(v=True).split('\n')
split_indices = [ind.split() for ind in indices]
headers = split_indices.pop(0)
index_info = {} # for full output
warn_index_info = {} # for brief output
for index in split_indices:
if len(index) == 0:
continue
index_info[index[2]] = {header: index[idx] for idx, header in enumerate(headers)}
if index_info[index[2]]['health'] != 'green' or index_info[index[2]]['status'] != 'open':
warn_index_info[index[2]] = index_info[index[2]]
# set fields, store result
if not index_info:
check.status = 'FAIL'
check.summary = 'Error reading status of ES indices'
check.description = 'Error reading status of ES indices'
def elastic_search_space(connection, **kwargs):
""" Checks that our ES nodes all have a certain amount of space remaining """
check = CheckResult(connection, 'elastic_search_space')
full_output = {}
client = es_utils.create_es_client(connection.ff_es, True)
# use cat.nodes to get id,diskAvail for all nodes, filter out empties
node_space_entries = filter(None, [data.split() for data in client.cat.nodes(h='id,diskAvail').split('\n')])
check.summary = check.description = None
full_output['nodes'] = {}
for _id, remaining_space in node_space_entries:
if 'gb' not in remaining_space:
if 'mb' not in remaining_space:
check.status = 'FAIL'
check.summary = check.description = 'At least one of the nodes in this env has no space remaining'
else:
check.status = 'WARN'
check.summary = check.description = 'At least one of the nodes in this env is low on space'
full_output['nodes'][_id.strip()] = { 'remaining_space': remaining_space }
if check.summary is None:
check.status = 'PASS'
check.summary = check.description = 'All nodes have >1gb remaining disk space'
def indexing_records(connection, **kwargs):
check = CheckResult(connection, 'indexing_records')
client = es_utils.create_es_client(connection.ff_es, True)
namespaced_index = connection.ff_env + 'indexing'
# make sure we have the index and items within it
if (not client.indices.exists(namespaced_index) or
client.count(index=namespaced_index).get('count', 0) < 1):
check.summary = check.description = 'No indexing records found'
check.status = 'PASS'
return check
res = client.search(index=namespaced_index, doc_type='indexing', sort='uuid:desc', size=1000,
body={'query': {'query_string': {'query': '_exists_:indexing_status'}}})
delta_days = datetime.timedelta(days=3)
all_records = res.get('hits', {}).get('hits', [])
recent_records = []
warn_records = []
for rec in all_records:
if rec['_id'] == 'latest_indexing':
def __init__(self, index=None, doc_type='result'):
self.es = es_utils.create_es_client(HOST, use_aws_url=True)
self.index = index
if index and not self.index_exists(index):
self.create_index(index)
self.doc_type = doc_type
def wipe_build_indices(connection, **kwargs):
""" Wipes all indices on the FF-Build ES env """
check = CheckResult(connection, 'wipe_build_indices')
check.status = 'PASS'
check.summary = check.description = 'Wiped all test indices'
BUILD_ES = 'search-fourfront-builds-uhevxdzfcv7mkm5pj5svcri3aq.us-east-1.es.amazonaws.com:80'
client = es_utils.create_es_client(BUILD_ES, True)
full_output = client.indices.delete(index='*')
if full_output['acknowledged'] != True:
check.status = 'FAIL'
check.summary = check.description = 'Failed to wipe all test indices, see full output'
check.full_output = full_output
return check
# staging_deploy = CheckResult(staging_conn, 'staging_deployment').get_primary_result()
# if staging_deploy['status'] != 'PASS':
# check.summary = 'Staging deployment is running - skipping'
# return check
if get_stage_info()['stage'] != 'prod':
check.summary = check.description = 'This check only runs on Foursight prod'
return check
time_limit = 270 # 4.5 minutes
t0 = time.time()
check.full_output = {} # purged items by item type
search = '/search/?type=TrackingItem&tracking_type=download_tracking&status=deleted&field=uuid&limit=300'
search_res = ff_utils.search_metadata(search, key=connection.ff_keys)
search_uuids = [res['uuid'] for res in search_res]
client = es_utils.create_es_client(connection.ff_es, True)
# a bit convoluted, but we want the frame=raw, which does not include uuid
# use get_es_metadata to handle this. Use it as a generator
for to_purge in ff_utils.get_es_metadata(search_uuids, es_client=client, is_generator=True,
key=connection.ff_keys):
if round(time.time() - t0, 2) > time_limit:
break
purge_properties = to_purge['properties']
purge_properties['uuid'] = to_purge['uuid'] # add uuid to frame=raw
try:
purge_res = ff_utils.purge_metadata(to_purge['uuid'], key=connection.ff_keys)
except Exception as exc:
purge_status = 'error'
purge_detail = str(exc)
else:
purge_status = purge_res['status']
purge_detail = purge_properties if purge_status == 'success' else purge_res