Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def remote_pools_updated():
hookenv.log(
"Pools updated on remote host, restarting pool manager",
level=hookenv.DEBUG)
host.service_restart('designate-pool-manager')
def service_restart(service_name):
"""
Wrapper around host.service_restart to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_available(service_name):
if host.service_running(service_name):
host.service_restart(service_name)
else:
host.service_start(service_name)
def start_resourcemanager(namenode):
hookenv.status_set('maintenance', 'starting resourcemanager')
# NB: service should be started by install, but we want to verify it is
# running before we set the .started state and open ports. We always
# restart here, which may seem heavy-handed. However, restart works
# whether the service is currently started or stopped. It also ensures the
# service is using the most current config.
rm_started = host.service_restart('hadoop-yarn-resourcemanager')
if rm_started:
for port in get_layer_opts().exposed_ports('resourcemanager'):
hookenv.open_port(port)
set_state('apache-bigtop-resourcemanager.started')
hookenv.status_set('maintenance', 'resourcemanager started')
hookenv.application_version_set(get_hadoop_version())
else:
hookenv.log('YARN ResourceManager failed to start')
hookenv.status_set('blocked', 'resourcemanager failed to start')
remove_state('apache-bigtop-resourcemanager.started')
for port in get_layer_opts().exposed_ports('resourcemanager'):
hookenv.close_port(port)
hs_started = host.service_restart('hadoop-mapreduce-historyserver')
if not hs_started:
hookenv.log('YARN HistoryServer failed to start')
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def restart_scheduler():
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
def service_restart_handler(relation_id=None, unit=None,
default_service=None):
'''Handler for detecting requests from subordinate
charms for restarts of services'''
restart_nonce = relation_get(attribute='restart-nonce',
unit=unit,
rid=relation_id)
db = unitdata.kv()
nonce_key = 'restart-nonce'
if restart_nonce != db.get(nonce_key):
if not is_unit_paused_set():
service = relation_get(attribute='remote-service',
unit=unit,
rid=relation_id) or default_service
if service:
service_restart(service)
db.set(nonce_key, restart_nonce)
db.flush()
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
def restart_scheduler():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
hookenv.status_set(prev_state, prev_msg)
def restart_apiserver():
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
def restart_controller_manager():
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')