Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from pypowervm import adapter as pvm_adpt
from pypowervm.helpers import log_helper as log_hlp
from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks.monitor import util as pvm_mon_util
from pypowervm.utils import uuid as pvm_uuid
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import network as pvm_net
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer_powervm.compute.virt.powervm.i18n import _
LOG = logging.getLogger(__name__)
class PowerVMInspector(virt_inspector.Inspector):
"""The implementation of the inspector for the PowerVM hypervisor.
This code requires that it is run on the PowerVM Compute Host directly.
Utilizes the pypowervm library to gather the instance metrics.
"""
def __init__(self, conf):
super(PowerVMInspector, self).__init__(conf)
# Build the adapter. May need to attempt the connection multiple times
# in case the REST server is starting.
session = pvm_adpt.Session(conn_tries=300)
self.adpt = pvm_adpt.Adapter(
session, helpers=[log_hlp.log_helper,
vio_hlp.vios_busy_retry_helper])
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
try:
c_data = self._populate_cache(
self.inspector,
cache,
instance,
)
for s in self._get_samples(instance, c_data):
yield s
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.InstanceShutOffException as e:
LOG.debug('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s',
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except virt_inspector.NoDataException as e:
LOG.warning(_LW('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s'),
{'pollster': self.__class__.__name__,
'instance_id': instance.id, 'exc': e})
raise plugin_base.PollsterPermanentError(resources)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug('Obtaining perf events is not implemented'
vm_ref = self._lookup_by_name(instance_name)
dom_id = self.session.VM.get_domid(vm_ref)
vif_refs = self.session.VM.get_VIFs(vm_ref)
bw_all = self.session.call_plugin_serialized('bandwidth',
'fetch_all_bandwidth')
LOG.debug("inspect_vnics, all bandwidth: %s", bw_all,
instance=instance)
for vif_ref in vif_refs:
vif_rec = self.session.VIF.get_record(vif_ref)
bw_vif = bw_all[dom_id][vif_rec['device']]
# TODO(jianghuaw): Currently the plugin can only support
# rx_bytes and tx_bytes, so temporarily set others as -1.
yield virt_inspector.InterfaceStats(
name=vif_rec['uuid'],
mac=vif_rec['MAC'],
fref=None,
parameters=None,
rx_bytes=bw_vif['bw_in'], rx_packets=-1, rx_drop=-1,
rx_errors=-1, tx_bytes=bw_vif['bw_out'], tx_packets=-1,
tx_drop=-1, tx_errors=-1)
password = conf.xenapi.connection_password
if not url or password is None:
raise XenapiException(_('Must specify connection_url, and '
'connection_password to use'))
try:
session = xenapi_session.XenAPISession(url, username, password,
originator="ceilometer")
LOG.debug("XenAPI session is created successfully, %s", session)
except XenAPI.Failure as e:
msg = _("Could not connect to XenAPI: %s") % e.details[0]
raise XenapiException(msg)
return session
class XenapiInspector(virt_inspector.Inspector):
def __init__(self, conf):
super(XenapiInspector, self).__init__(conf)
self.session = get_api_session(self.conf)
def _lookup_by_name(self, instance_name):
vm_refs = self.session.VM.get_by_name_label(instance_name)
n = len(vm_refs)
if n == 0:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in XenServer') % instance_name)
elif n > 1:
raise XenapiException(
_('Multiple VM %s found in XenServer') % instance_name)
else:
return vm_refs[0]
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from ceilometer.compute.virt import inspector
from ceilometer.i18n import _
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING = "Msvm_MemorySettingData"
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
def inspect_disk_latency(self, instance, duration):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_latency_metrics(
instance_name):
yield virt_inspector.DiskLatencyStats(
device=disk_metrics['instance_id'],
disk_latency=disk_metrics['disk_latency'] / 1000)
# virConnectGetAllDomainStats()
for vcpu in six.moves.range(stats.get('vcpu.maximum', 0)):
try:
cpu_time += (stats.get('vcpu.%s.time' % vcpu) +
stats.get('vcpu.%s.wait' % vcpu))
current_cpus -= 1
except TypeError:
# pass here, if there are too many holes, the cpu count will
# not match, so don't need special error handling.
pass
if current_cpus:
# There wasn't enough data, so fall back
cpu_time = stats.get('cpu.time')
return virt_inspector.InstanceStats(
cpu_number=stats.get('vcpu.current'),
cpu_time=cpu_time,
memory_usage=memory_used,
memory_resident=memory_resident,
memory_swap_in=memory_swap_in,
memory_swap_out=memory_swap_out,
cpu_cycles=stats.get("perf.cpu_cycles"),
instructions=stats.get("perf.instructions"),
cache_references=stats.get("perf.cache_references"),
cache_misses=stats.get("perf.cache_misses"),
memory_bandwidth_total=stats.get("perf.mbmt"),
memory_bandwidth_local=stats.get("perf.mbml"),
cpu_l3_cache_usage=stats.get("perf.cmt"),
)
def inspect_disk_rates(self, instance, duration):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
vbd_refs = self.session.VM.get_VBDs(vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = self.session.VBD.get_record(vbd_ref)
read_rate = float(self.session.VM.query_data_source(
vm_ref, "vbd_%s_read" % vbd_rec['device']))
write_rate = float(self.session.VM.query_data_source(
vm_ref, "vbd_%s_write" % vbd_rec['device']))
yield virt_inspector.DiskRateStats(
device=vbd_rec['device'],
read_bytes_rate=read_rate,
read_requests_rate=0,
write_bytes_rate=write_rate,
write_requests_rate=0)
def inspect_vnic_rates(self, instance, duration):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
vif_refs = self.session.VM.get_VIFs(vm_ref)
for vif_ref in vif_refs:
vif_rec = self.session.VIF.get_record(vif_ref)
rx_rate = float(self.session.VM.query_data_source(
vm_ref, "vif_%s_rx" % vif_rec['device']))
tx_rate = float(self.session.VM.query_data_source(
vm_ref, "vif_%s_tx" % vif_rec['device']))
yield virt_inspector.InterfaceRateStats(
name=vif_rec['uuid'],
mac=vif_rec['MAC'],
fref=None,
parameters=None,
rx_bytes_rate=rx_rate,
tx_bytes_rate=tx_rate)
for instance in resources:
LOG.debug(_('checking cache usage for instance %s'), instance.id)
try:
cpu_cache = self.inspector.inspect_cpu_l3_cache(instance)
LOG.debug(_("CPU cache size: %(id)s %(cache_size)d"),
({'id': instance.id,
'cache_size': cpu_cache.l3_cache_usage}))
yield util.make_sample_from_instance(
self.conf,
instance,
name='cpu_l3_cache',
type=sample.TYPE_GAUGE,
unit='B',
volume=cpu_cache.l3_cache_usage,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug('Exception while getting samples %s', err)
except virt_inspector.NoDataException as e:
LOG.warning(('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s'),
{'pollster': self.__class__.__name__,
'instance_id': instance.id, 'exc': e})
raise plugin_base.PollsterPermanentError(resources)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug('Obtaining cache usage is not implemented for %s',
self.inspector.__class__.__name__)
raise plugin_base.PollsterPermanentError(resources)
except Exception as err:
LOG.exception(
_LE('Could not get cache usage for %(id)s: %(e)s'),