forked from nicolargo/glances
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprocesses.py
702 lines (586 loc) · 26.1 KB
/
processes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2024 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
import os
import psutil
from glances.filter import GlancesFilter, GlancesFilterList
from glances.globals import BSD, LINUX, MACOS, WINDOWS, iterkeys, list_of_namedtuple_to_list_of_dict, namedtuple_to_dict
from glances.logger import logger
from glances.programs import processes_to_programs
from glances.timer import Timer, getTimeSinceLastUpdate
psutil_version_info = tuple([int(num) for num in psutil.__version__.split('.')])
# This constant defines the list of available processes sort key
sort_processes_key_list = ['cpu_percent', 'memory_percent', 'username', 'cpu_times', 'io_counters', 'name']
# Sort dictionary for human
sort_for_human = {
'io_counters': 'disk IO',
'cpu_percent': 'CPU consumption',
'memory_percent': 'memory consumption',
'cpu_times': 'process time',
'username': 'user name',
'name': 'processs name',
None: 'None',
}
class GlancesProcesses:
"""Get processed stats using the psutil library."""
def __init__(self, cache_timeout=60):
"""Init the class to collect stats about processes."""
# Init the args, coming from the GlancesStandalone class
# Should be set by the set_args method
self.args = None
# The internals caches will be cleaned each 'cache_timeout' seconds
self.cache_timeout = cache_timeout
# First iteration, no cache
self.cache_timer = Timer(0)
# Init the io_old dict used to compute the IO bitrate
# key = pid
# value = [ read_bytes_old, write_bytes_old ]
self.io_old = {}
# Init stats
self.auto_sort = None
self._sort_key = None
# Default processes sort key is 'auto'
# Can be overwrite from the configuration file (issue#1536) => See glances_processlist.py init
self.set_sort_key('auto', auto=True)
self.processlist = []
self.reset_processcount()
# Cache is a dict with key=pid and value = dict of cached value
self.processlist_cache = {}
# List of processes stats to export
# Only process matching one of the filter will be exported
self._filter_export = GlancesFilterList()
self.processlist_export = []
# Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
# Default is to enable the processes stats
self.disable_tag = False
# Extended stats for top process is enable by default
self.disable_extended_tag = False
self.extended_process = None
# Test if the system can grab io_counters
try:
p = psutil.Process()
p.io_counters()
except Exception as e:
logger.warning(f'PsUtil can not grab processes io_counters ({e})')
self.disable_io_counters = True
else:
logger.debug('PsUtil can grab processes io_counters')
self.disable_io_counters = False
# Test if the system can grab gids
try:
p = psutil.Process()
p.gids()
except Exception as e:
logger.warning(f'PsUtil can not grab processes gids ({e})')
self.disable_gids = True
else:
logger.debug('PsUtil can grab processes gids')
self.disable_gids = False
# Maximum number of processes showed in the UI (None if no limit)
self._max_processes = None
# Process filter
self._filter = GlancesFilter()
# Whether or not to hide kernel threads
self.no_kernel_threads = False
# Store maximums values in a dict
# Used in the UI to highlight the maximum value
self._max_values_list = ('cpu_percent', 'memory_percent')
# { 'cpu_percent': 0.0, 'memory_percent': 0.0 }
self._max_values = {}
self.reset_max_values()
def set_args(self, args):
"""Set args."""
self.args = args
def reset_internal_cache(self):
"""Reset the internal cache."""
self.cache_timer = Timer(0)
self.processlist_cache = {}
if hasattr(psutil.process_iter, 'cache_clear'):
# Cache clear only available in PsUtil 6 or higher
psutil.process_iter.cache_clear()
def reset_processcount(self):
"""Reset the global process count"""
self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None}
def update_processcount(self, plist):
"""Update the global process count from the current processes list"""
# Update the maximum process ID (pid) number
self.processcount['pid_max'] = self.pid_max
# For each key in the processcount dict
# count the number of processes with the same status
for k in iterkeys(self.processcount):
self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist)))
# Compute thread
self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None)
# Compute total
self.processcount['total'] = len(plist)
def enable(self):
"""Enable process stats."""
self.disable_tag = False
self.update()
def disable(self):
"""Disable process stats."""
self.disable_tag = True
def enable_extended(self):
"""Enable extended process stats."""
self.disable_extended_tag = False
self.update()
def disable_extended(self):
"""Disable extended process stats."""
self.disable_extended_tag = True
@property
def pid_max(self):
"""
Get the maximum PID value.
On Linux, the value is read from the `/proc/sys/kernel/pid_max` file.
From `man 5 proc`:
The default value for this file, 32768, results in the same range of
PIDs as on earlier kernels. On 32-bit platforms, 32768 is the maximum
value for pid_max. On 64-bit systems, pid_max can be set to any value
up to 2^22 (PID_MAX_LIMIT, approximately 4 million).
If the file is unreadable or not available for whatever reason,
returns None.
Some other OSes:
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD >= 6.0 the maximum is 99999 (was 32766).
- On NetBSD the maximum is 30000.
:returns: int or None
"""
if LINUX:
# XXX: waiting for https://github.com/giampaolo/psutil/issues/720
try:
with open('/proc/sys/kernel/pid_max', 'rb') as f:
return int(f.read())
except OSError:
return None
else:
return None
@property
def processes_count(self):
"""Get the current number of processes showed in the UI."""
return min(self._max_processes - 2, glances_processes.processcount['total'] - 1)
@property
def max_processes(self):
"""Get the maximum number of processes showed in the UI."""
return self._max_processes
@max_processes.setter
def max_processes(self, value):
"""Set the maximum number of processes showed in the UI."""
self._max_processes = value
# Process filter
@property
def process_filter_input(self):
"""Get the process filter (given by the user)."""
return self._filter.filter_input
@property
def process_filter(self):
"""Get the process filter (current apply filter)."""
return self._filter.filter
@process_filter.setter
def process_filter(self, value):
"""Set the process filter."""
self._filter.filter = value
@property
def process_filter_key(self):
"""Get the process filter key."""
return self._filter.filter_key
@property
def process_filter_re(self):
"""Get the process regular expression compiled."""
return self._filter.filter_re
# Export filter
@property
def export_process_filter(self):
"""Get the export process filter (current export process filter list)."""
return self._filter_export.filter
@export_process_filter.setter
def export_process_filter(self, value):
"""Set the export process filter list."""
self._filter_export.filter = value
# Kernel threads
def disable_kernel_threads(self):
"""Ignore kernel threads in process list."""
self.no_kernel_threads = True
@property
def sort_reverse(self):
"""Return True to sort processes in reverse 'key' order, False instead."""
if self.sort_key == 'name' or self.sort_key == 'username':
return False
return True
def max_values(self):
"""Return the max values dict."""
return self._max_values
def get_max_values(self, key):
"""Get the maximum values of the given stat (key)."""
return self._max_values[key]
def set_max_values(self, key, value):
"""Set the maximum value for a specific stat (key)."""
self._max_values[key] = value
def reset_max_values(self):
"""Reset the maximum values dict."""
self._max_values = {}
for k in self._max_values_list:
self._max_values[k] = 0.0
def get_extended_stats(self, proc):
"""Get the extended stats for the given PID."""
# - cpu_affinity (Linux, Windows, FreeBSD)
# - ionice (Linux and Windows > Vista)
# - num_ctx_switches (not available on Illumos/Solaris)
# - num_fds (Unix-like)
# - num_handles (Windows)
# - memory_maps (only swap, Linux)
# https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/
# - connections (TCP and UDP)
# - CPU min/max/mean
# Set the extended stats list (OS dependent)
extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches']
if LINUX:
# num_fds only available on Unix system (see issue #1351)
extended_stats += ['num_fds']
if WINDOWS:
extended_stats += ['num_handles']
ret = {}
try:
logger.debug('Grab extended stats for process {}'.format(proc['pid']))
# Get PID of the selected process
selected_process = psutil.Process(proc['pid'])
# Get the extended stats for the selected process
ret = selected_process.as_dict(attrs=extended_stats, ad_value=None)
# Get memory swap for the selected process (Linux Only)
ret['memory_swap'] = self.__get_extended_memory_swap(selected_process)
# Get number of TCP and UDP network connections for the selected process
ret['tcp'], ret['udp'] = self.__get_extended_connections(selected_process)
except (psutil.NoSuchProcess, ValueError, AttributeError) as e:
logger.error(f'Can not grab extended stats ({e})')
self.extended_process = None
ret['extended_stats'] = False
else:
# Compute CPU and MEM min/max/mean
# Merge the returned dict with the current on
ret.update(self.__get_min_max_mean(proc))
self.extended_process = ret
ret['extended_stats'] = True
return namedtuple_to_dict(ret)
def __get_min_max_mean(self, proc, prefix=['cpu', 'memory']):
"""Return the min/max/mean for the given process"""
ret = {}
for stat_prefix in prefix:
min_key = stat_prefix + '_min'
max_key = stat_prefix + '_max'
mean_sum_key = stat_prefix + '_mean_sum'
mean_counter_key = stat_prefix + '_mean_counter'
if min_key not in self.extended_process:
ret[min_key] = proc[stat_prefix + '_percent']
else:
ret[min_key] = min(proc[stat_prefix + '_percent'], self.extended_process[min_key])
if max_key not in self.extended_process:
ret[max_key] = proc[stat_prefix + '_percent']
else:
ret[max_key] = max(proc[stat_prefix + '_percent'], self.extended_process[max_key])
if mean_sum_key not in self.extended_process:
ret[mean_sum_key] = proc[stat_prefix + '_percent']
else:
ret[mean_sum_key] = self.extended_process[mean_sum_key] + proc[stat_prefix + '_percent']
if mean_counter_key not in self.extended_process:
ret[mean_counter_key] = 1
else:
ret[mean_counter_key] = self.extended_process[mean_counter_key] + 1
ret[stat_prefix + '_mean'] = ret[mean_sum_key] / ret[mean_counter_key]
return ret
def __get_extended_memory_swap(self, process):
"""Return the memory swap for the given process"""
if not LINUX:
return None
try:
memory_swap = sum([v.swap for v in process.memory_maps()])
except (psutil.NoSuchProcess, KeyError):
# (KeyError catch for issue #1551)
pass
except (psutil.AccessDenied, NotImplementedError):
# NotImplementedError: /proc/${PID}/smaps file doesn't exist
# on kernel < 2.6.14 or CONFIG_MMU kernel configuration option
# is not enabled (see psutil #533/glances #413).
memory_swap = None
return memory_swap
def __get_extended_connections(self, process):
"""Return a tuple with (tcp, udp) connections count
The code is compliant with both PsUtil<6 and Psutil>=6
"""
try:
# Hack for issue #2754 (PsUtil 6+)
if psutil_version_info[0] >= 6:
tcp = len(process.net_connections(kind="tcp"))
udp = len(process.net_connections(kind="udp"))
else:
tcp = len(process.connections(kind="tcp"))
udp = len(process.connections(kind="udp"))
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Manage issue1283 (psutil.AccessDenied)
tcp = None
udp = None
return tcp, udp
def is_selected_extended_process(self, position):
"""Return True if the process is the selected one for extended stats."""
return (
hasattr(self.args, 'programs')
and not self.args.programs
and hasattr(self.args, 'enable_process_extended')
and self.args.enable_process_extended
and not self.disable_extended_tag
and hasattr(self.args, 'cursor_position')
and position == self.args.cursor_position
and not self.args.disable_cursor
)
def update(self):
"""Update the processes stats."""
# Init new processes stats
processlist = []
# Do not process if disable tag is set
if self.disable_tag:
return processlist
# Time since last update (for disk_io rate computation)
time_since_update = getTimeSinceLastUpdate('process_disk')
# Grab standard stats
#####################
sorted_attrs = ['cpu_percent', 'cpu_times', 'memory_percent', 'name', 'status', 'num_threads']
displayed_attr = ['memory_info', 'nice', 'pid']
# The following attributes are cached and only retrieve every self.cache_timeout seconds
# Warning: 'name' can not be cached because it is used for filtering
cached_attrs = ['cmdline', 'username']
# Some stats are optional
if not self.disable_io_counters:
sorted_attrs.append('io_counters')
if not self.disable_gids:
displayed_attr.append('gids')
# Some stats are not sort key
# An optimisation can be done be only grabbed displayed_attr
# for displayed processes (but only in standalone mode...)
sorted_attrs.extend(displayed_attr)
# Some stats are cached (not necessary to be refreshed every time)
if self.cache_timer.finished():
sorted_attrs += cached_attrs
self.cache_timer.set(self.cache_timeout)
self.cache_timer.reset()
is_cached = False
else:
is_cached = True
# Build the processes stats list (it is why we need psutil>=5.3.0)
# This is one of the main bottleneck of Glances (see flame graph)
# It may be optimized with PsUtil 6+ (see issue #2755)
processlist = list(
filter(
lambda p: not (BSD and p.info['name'] == 'idle')
and not (WINDOWS and p.info['name'] == 'System Idle Process')
and not (MACOS and p.info['name'] == 'kernel_task')
and not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0),
psutil.process_iter(attrs=sorted_attrs, ad_value=None),
)
)
# Only get the info key
# PsUtil 6+ no longer check PID reused #2755 so use is_running in the loop
# Note: not sure it is realy needed but CPU consumption look the same with or without it
processlist = [p.info for p in processlist if p.is_running()]
# Sort the processes list by the current sort_key
processlist = sort_stats(processlist, sorted_by=self.sort_key, reverse=True)
# Update the processcount
self.update_processcount(processlist)
# Loop over processes and :
# - add extended stats for selected process
# - add metadata
for position, proc in enumerate(processlist):
# Extended stats
################
# Get the selected process when the 'e' key is pressed
if self.is_selected_extended_process(position):
self.extended_process = proc
# Grab extended stats only for the selected process (see issue #2225)
if self.extended_process is not None and proc['pid'] == self.extended_process['pid']:
proc.update(self.get_extended_stats(self.extended_process))
self.extended_process = namedtuple_to_dict(proc)
# Meta data
###########
# PID is the key
proc['key'] = 'pid'
# Time since last update (for disk_io rate computation)
proc['time_since_update'] = time_since_update
# Process status (only keep the first char)
proc['status'] = str(proc['status'])[:1].upper()
# Process IO
# procstat['io_counters'] is a list:
# [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag]
# If io_tag = 0 > Access denied or first time (display "?")
# If io_tag = 1 > No access denied (display the IO rate)
if 'io_counters' in proc and proc['io_counters'] is not None:
io_new = [proc['io_counters'][2], proc['io_counters'][3]]
# For IO rate computation
# Append saved IO r/w bytes
try:
proc['io_counters'] = io_new + self.io_old[proc['pid']]
io_tag = 1
except KeyError:
proc['io_counters'] = io_new + [0, 0]
io_tag = 0
# then save the IO r/w bytes
self.io_old[proc['pid']] = io_new
else:
proc['io_counters'] = [0, 0] + [0, 0]
io_tag = 0
# Append the IO tag (for display)
proc['io_counters'] += [io_tag]
# Manage cached information
if is_cached:
# Grab cached values (in case of a new incoming process)
if proc['pid'] not in self.processlist_cache:
try:
self.processlist_cache[proc['pid']] = psutil.Process(pid=proc['pid']).as_dict(
attrs=cached_attrs, ad_value=None
)
except psutil.NoSuchProcess:
pass
# Add cached value to current stat
try:
proc.update(self.processlist_cache[proc['pid']])
except KeyError:
pass
else:
# Save values to cache
try:
self.processlist_cache[proc['pid']] = {cached: proc[cached] for cached in cached_attrs}
except KeyError:
pass
# Filter and transform process export list
self.processlist_export = self.update_export_list(processlist)
# Filter and transform process list
processlist = self.update_list(processlist)
# Compute the maximum value for keys in self._max_values_list: CPU, MEM
# Useful to highlight the processes with maximum values
for k in self._max_values_list:
values_list = [i[k] for i in processlist if i[k] is not None]
if values_list:
self.set_max_values(k, max(values_list))
# Update the stats
self.processlist = processlist
return self.processlist
def update_list(self, processlist):
"""Return the process list after filtering and transformation (namedtuple to dict)."""
if self._filter.filter is None:
return list_of_namedtuple_to_list_of_dict(processlist)
ret = list(filter(lambda p: self._filter.is_filtered(p), processlist))
return list_of_namedtuple_to_list_of_dict(ret)
def update_export_list(self, processlist):
"""Return the process export list after filtering and transformation (namedtuple to dict)."""
if self._filter_export.filter == []:
return []
ret = list(filter(lambda p: self._filter_export.is_filtered(p), processlist))
return list_of_namedtuple_to_list_of_dict(ret)
def get_count(self):
"""Get the number of processes."""
return self.processcount
def get_list(self, sorted_by=None, as_programs=False):
"""Get the processlist.
By default, return the list of threads.
If as_programs is True, return the list of programs."""
if as_programs:
return processes_to_programs(self.processlist)
return self.processlist
def get_export(self):
"""Return the processlist for export."""
return self.processlist_export
@property
def sort_key(self):
"""Get the current sort key."""
return self._sort_key
def set_sort_key(self, key, auto=True):
"""Set the current sort key."""
if key == 'auto':
self.auto_sort = True
self._sort_key = 'cpu_percent'
else:
self.auto_sort = auto
self._sort_key = key
def nice_decrease(self, pid):
"""Decrease nice level
On UNIX this is a number which usually goes from -20 to 20.
The higher the nice value, the lower the priority of the process."""
p = psutil.Process(pid)
try:
p.nice(p.nice() - 1)
logger.info(f'Set nice level of process {pid} to {p.nice()} (higher the priority)')
except psutil.AccessDenied:
logger.warning(f'Can not decrease (higher the priority) the nice level of process {pid} (access denied)')
def nice_increase(self, pid):
"""Increase nice level
On UNIX this is a number which usually goes from -20 to 20.
The higher the nice value, the lower the priority of the process."""
p = psutil.Process(pid)
try:
p.nice(p.nice() + 1)
logger.info(f'Set nice level of process {pid} to {p.nice()} (lower the priority)')
except psutil.AccessDenied:
logger.warning(f'Can not increase (lower the priority) the nice level of process {pid} (access denied)')
def kill(self, pid, timeout=3):
"""Kill process with pid"""
assert pid != os.getpid(), "Glances can kill itself..."
p = psutil.Process(pid)
logger.debug(f'Send kill signal to process: {p}')
p.kill()
return p.wait(timeout)
def weighted(value):
"""Manage None value in dict value."""
return -float('inf') if value is None else value
def _sort_io_counters(process, sorted_by='io_counters', sorted_by_secondary='memory_percent'):
"""Specific case for io_counters
:return: Sum of io_r + io_w
"""
return process[sorted_by][0] - process[sorted_by][2] + process[sorted_by][1] - process[sorted_by][3]
def _sort_cpu_times(process, sorted_by='cpu_times', sorted_by_secondary='memory_percent'):
"""Specific case for cpu_times
Patch for "Sorting by process time works not as expected #1321"
By default PsUtil only takes user time into account
see (https://github.com/giampaolo/psutil/issues/1339)
The following implementation takes user and system time into account
"""
return process[sorted_by][0] + process[sorted_by][1]
def _sort_lambda(sorted_by='cpu_percent', sorted_by_secondary='memory_percent'):
"""Return a sort lambda function for the sorted_by key"""
ret = None
if sorted_by == 'io_counters':
ret = _sort_io_counters
elif sorted_by == 'cpu_times':
ret = _sort_cpu_times
return ret
def sort_stats(stats, sorted_by='cpu_percent', sorted_by_secondary='memory_percent', reverse=True):
"""Return the stats (dict) sorted by (sorted_by).
A secondary sort key should be specified.
Reverse the sort if reverse is True.
"""
if sorted_by is None and sorted_by_secondary is None:
# No need to sort...
return stats
# Check if a specific sort should be done
sort_lambda = _sort_lambda(sorted_by=sorted_by, sorted_by_secondary=sorted_by_secondary)
if sort_lambda is not None:
# Specific sort
try:
stats.sort(key=sort_lambda, reverse=reverse)
except Exception:
# If an error is detected, fallback to cpu_percent
stats.sort(
key=lambda process: (weighted(process['cpu_percent']), weighted(process[sorted_by_secondary])),
reverse=reverse,
)
else:
# Standard sort
try:
stats.sort(
key=lambda process: (weighted(process[sorted_by]), weighted(process[sorted_by_secondary])),
reverse=reverse,
)
except (KeyError, TypeError):
# Fallback to name
stats.sort(key=lambda process: process['name'] if process['name'] is not None else '~', reverse=False)
return stats
glances_processes = GlancesProcesses()