Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def exit(self):
"""Just log an event when Glances exit."""
logger.debug("Stop the {} plugin".format(self.plugin_name))
def exit(self):
"""Close the CSV file."""
logger.debug("Finalise export interface %s" % self.export_name)
self.csv_file.close()
else:
logger.debug("Extended stats for top process are enabled")
glances_processes.enable_extended()
# Manage optionnal process filter
if args.process_filter is not None:
glances_processes.process_filter = args.process_filter
if (not WINDOWS) and args.no_kernel_threads:
# Ignore kernel threads in process list
glances_processes.disable_kernel_threads()
# Initial system informations update
start_duration.reset()
self.stats.update()
logger.debug("First stats update duration: {} seconds".format(start_duration.get()))
if self.quiet:
logger.info("Quiet mode is ON, nothing will be displayed")
# In quiet mode, nothing is displayed
glances_processes.max_processes = 0
elif args.stdout:
logger.info("Stdout mode is ON, following stats will be displayed: {}".format(args.stdout))
# Init screen
self.screen = GlancesStdout(config=config, args=args)
elif args.stdout_csv:
logger.info("Stdout CSV mode is ON, following stats will be displayed: {}".format(args.stdout))
# Init screen
self.screen = GlancesStdoutCsv(config=config, args=args)
else:
# Default number of processes to displayed is set to 50
glances_processes.max_processes = 50
if self.args.snmp_force:
# Force SNMP instead of Glances server
self.client_mode = 'snmp'
else:
# First of all, trying to connect to a Glances server
if not self._login_glances():
return False
# Try SNMP mode
if self.client_mode == 'snmp':
if not self._login_snmp():
return False
# Load limits from the configuration file
# Each client can choose its owns limits
logger.debug("Load limits from the client configuration file")
self.stats.load_limits(self.config)
# Init screen
if self.quiet:
# In quiet mode, nothing is displayed
logger.info("Quiet mode is ON: Nothing will be displayed")
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
# Return True: OK
return True
for net in netiocounters:
# Do not take hidden interface into account
if self.is_hide(net):
continue
# Grab the stats using the Wifi Python lib
try:
wifi_cells = Cell.all(net)
except InterfaceError as e:
# Not a Wifi interface
logger.debug("WIFI plugin: Scan InterfaceError ({})".format(e))
pass
except Exception as e:
# Other error
logger.debug("WIFI plugin: Can not grab cellule stats ({})".format(e))
pass
else:
for wifi_cell in wifi_cells:
hotspot = {
'key': self.get_key(),
'ssid': wifi_cell.ssid,
'signal': wifi_cell.signal,
'quality': wifi_cell.quality,
'encrypted': wifi_cell.encrypted,
'encryption_type': wifi_cell.encryption_type if wifi_cell.encrypted else None
}
# Add the hotspot to the list
stats.append(hotspot)
elif self.input_method == 'snmp':
# Update stats using SNMP
"""
cpu_new = {}
ret = {'total': 0.0}
# Read the stats
# For each container, you will find a pseudo-file cpuacct.stat,
# containing the CPU usage accumulated by the processes of the container.
# Those times are expressed in ticks of 1/USER_HZ of a second.
# On x86 systems, USER_HZ is 100.
try:
cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage']
cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage']
cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
except KeyError as e:
# all_stats do not have CPU information
logger.debug("docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
else:
# Previous CPU stats stored in the cpu_old variable
if not hasattr(self, 'cpu_old'):
# First call, we init the cpu_old variable
self.cpu_old = {}
try:
self.cpu_old[container_id] = cpu_new
except (IOError, UnboundLocalError):
pass
if container_id not in self.cpu_old:
try:
self.cpu_old[container_id] = cpu_new
except (IOError, UnboundLocalError):
pass
def _load_cache(self):
"""Load cache file and return cached data"""
# If the cached file exist, read-it
max_refresh_date = timedelta(days=7)
cached_data = {}
try:
with open(self.cache_file, 'rb') as f:
cached_data = pickle.load(f)
except Exception as e:
logger.debug("Cannot read version from cache file: {} ({})".format(self.cache_file, e))
else:
logger.debug("Read version from cache file")
if (cached_data['installed_version'] != self.installed_version() or
datetime.now() - cached_data['refresh_date'] > max_refresh_date):
# Reset the cache if:
# - the installed version is different
# - the refresh_date is > max_refresh_date
cached_data = {}
return cached_data
Input: id is the full container id
all_stats is the output of the stats method of the Docker API
Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
"""
ret = {}
# Read the stats
try:
# Do not exist anymore with Docker 1.11 (issue #848)
# ret['rss'] = all_stats['memory_stats']['stats']['rss']
# ret['cache'] = all_stats['memory_stats']['stats']['cache']
ret['usage'] = all_stats['memory_stats']['usage']
ret['limit'] = all_stats['memory_stats']['limit']
ret['max_usage'] = all_stats['memory_stats']['max_usage']
except (KeyError, TypeError) as e:
# all_stats do not have MEM information
logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
# Return the stats
return ret
def export(self, name, columns, points):
"""Export the stats to the Statsd server."""
if name == self.plugins_to_export()[0] and self.buffer != {}:
# One complete loop have been done
logger.debug("Export stats ({}) to RESTful endpoint ({})".format(listkeys(self.buffer),
self.client))
# Export stats
post(self.client, json=self.buffer, allow_redirects=True)
# Reset buffer
self.buffer = {}
# Add current stat to the buffer
self.buffer[name] = dict(zip(columns, points))
def export(self, name, columns, points):
"""Write the points in RabbitMQ."""
data = ('hostname=' + self.hostname + ', name=' + name +
', dateinfo=' + datetime.datetime.utcnow().isoformat())
for i in range(len(columns)):
if not isinstance(points[i], Number):
continue
else:
data += ", " + columns[i] + "=" + str(points[i])
logger.debug(data)
try:
self.client.basic_publish(exchange='', routing_key=self.queue, body=data)
except Exception as e:
logger.error("Can not export stats to RabbitMQ (%s)" % e)