"""This file common functions that are needed to monitor metrics"""
import threading
import logging
from perfmon.common.utils.json_wrappers import dump_json
_log = logging.getLogger(__name__)
# pylint: disable=E0401,W0201,C0301
[docs]def get_child_procs(user, procs):
"""Get list of children processes in user namespace
Args:
user (str): User name
procs (object): psutil proc iterator
Returns:
list: List of children processes in user space
"""
valid_procs = procs
valid_proc_pids = [p.pid for p in procs]
for proc in procs:
for child_proc in proc.children(recursive=True):
if child_proc.username() == user and child_proc.pid not in valid_proc_pids:
# Add CPU times of childs to parent
valid_procs.append(child_proc)
return valid_procs
[docs]def dump_metrics_async(data, outfile):
"""Dump metrics asynchronously
Args:
data (dict): Data to be dumped to disk
outfile(str): Path of the outfile
"""
# Append data to existing JSON file or create a new file if it doesnt exist
# Start a new thread async to dump data to the file
dump_metrics_thread = threading.Thread(target=dump_json, args=(data, outfile))
dump_metrics_thread.start()
[docs]def get_cumulative_metric_value(metric_type, procs, data):
"""This method gets cumulative metric account for all childs for a given metric type"""
# Check if metric_type is dict or list
if isinstance(data[metric_type], dict):
metric_value = 0
for key, value in data[metric_type].items():
for proc in procs:
obj_attr = getattr(proc, metric_type)()
metric_value += getattr(obj_attr, key)
value.append(metric_value)
elif isinstance(data[metric_type], list):
metric_value = 0
for proc in procs:
metric_value += getattr(proc, metric_type)()
data[metric_type].append(metric_value)
return data
[docs]def check_metric_data(data_struct):
"""This method checks if all the metric data is consistent with number of timestamps"""
# Get number of timestamps added into the data struct
ntime_stamps = len(data_struct['time_stamps'])
# Loop over all the metrics
for _, data in data_struct.items():
if isinstance(data, dict):
for _, values in data.items():
# We are adding the last monitored value as missing metric. Should not be an
# issue as it is end of the execution.
if values and len(values) < ntime_stamps:
values += [values[-1] for _ in range(ntime_stamps - len(values))]
elif isinstance(data, list):
if data and len(data) < ntime_stamps:
data += [data[-1] for _ in range(ntime_stamps - len(data))]
return data_struct