summaryrefslogtreecommitdiffstats
path: root/lib/psutil
diff options
context:
space:
mode:
Diffstat (limited to 'lib/psutil')
-rw-r--r--lib/psutil/__init__.py2421
-rw-r--r--lib/psutil/_common.py899
-rw-r--r--lib/psutil/_compat.py450
-rw-r--r--lib/psutil/_psaix.py555
-rw-r--r--lib/psutil/_psbsd.py927
-rw-r--r--lib/psutil/_pslinux.py2257
-rw-r--r--lib/psutil/_psosx.py543
-rw-r--r--lib/psutil/_psposix.py232
-rw-r--r--lib/psutil/_pssunos.py727
-rwxr-xr-xlib/psutil/_psutil_linux.abi3.sobin0 -> 107400 bytes
-rwxr-xr-xlib/psutil/_psutil_posix.abi3.sobin0 -> 71008 bytes
-rw-r--r--lib/psutil/_pswindows.py1120
-rw-r--r--lib/psutil/tests/__init__.py1820
-rw-r--r--lib/psutil/tests/__main__.py15
-rw-r--r--lib/psutil/tests/runner.py350
-rw-r--r--lib/psutil/tests/test_aix.py122
-rw-r--r--lib/psutil/tests/test_bsd.py568
-rw-r--r--lib/psutil/tests/test_connections.py554
-rw-r--r--lib/psutil/tests/test_contracts.py751
-rw-r--r--lib/psutil/tests/test_linux.py2286
-rw-r--r--lib/psutil/tests/test_memleaks.py492
-rw-r--r--lib/psutil/tests/test_misc.py852
-rw-r--r--lib/psutil/tests/test_osx.py241
-rw-r--r--lib/psutil/tests/test_posix.py432
-rw-r--r--lib/psutil/tests/test_process.py1591
-rw-r--r--lib/psutil/tests/test_sunos.py46
-rw-r--r--lib/psutil/tests/test_system.py892
-rw-r--r--lib/psutil/tests/test_testutils.py441
-rw-r--r--lib/psutil/tests/test_unicode.py355
-rw-r--r--lib/psutil/tests/test_windows.py898
30 files changed, 22837 insertions, 0 deletions
diff --git a/lib/psutil/__init__.py b/lib/psutil/__init__.py
new file mode 100644
index 0000000..5674279
--- /dev/null
+++ b/lib/psutil/__init__.py
@@ -0,0 +1,2421 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""psutil is a cross-platform library for retrieving information on
+running processes and system utilization (CPU, memory, disks, network,
+sensors) in Python. Supported platforms:
+
+ - Linux
+ - Windows
+ - macOS
+ - FreeBSD
+ - OpenBSD
+ - NetBSD
+ - Sun Solaris
+ - AIX
+
+Works with Python versions 2.7 and 3.4+.
+"""
+
+from __future__ import division
+
+import collections
+import contextlib
+import datetime
+import functools
+import os
+import signal
+import subprocess
+import sys
+import threading
+import time
+
+
+try:
+ import pwd
+except ImportError:
+ pwd = None
+
+from . import _common
+from ._common import AIX
+from ._common import BSD
+from ._common import CONN_CLOSE
+from ._common import CONN_CLOSE_WAIT
+from ._common import CONN_CLOSING
+from ._common import CONN_ESTABLISHED
+from ._common import CONN_FIN_WAIT1
+from ._common import CONN_FIN_WAIT2
+from ._common import CONN_LAST_ACK
+from ._common import CONN_LISTEN
+from ._common import CONN_NONE
+from ._common import CONN_SYN_RECV
+from ._common import CONN_SYN_SENT
+from ._common import CONN_TIME_WAIT
+from ._common import FREEBSD # NOQA
+from ._common import LINUX
+from ._common import MACOS
+from ._common import NETBSD # NOQA
+from ._common import NIC_DUPLEX_FULL
+from ._common import NIC_DUPLEX_HALF
+from ._common import NIC_DUPLEX_UNKNOWN
+from ._common import OPENBSD # NOQA
+from ._common import OSX # deprecated alias
+from ._common import POSIX # NOQA
+from ._common import POWER_TIME_UNKNOWN
+from ._common import POWER_TIME_UNLIMITED
+from ._common import STATUS_DEAD
+from ._common import STATUS_DISK_SLEEP
+from ._common import STATUS_IDLE
+from ._common import STATUS_LOCKED
+from ._common import STATUS_PARKED
+from ._common import STATUS_RUNNING
+from ._common import STATUS_SLEEPING
+from ._common import STATUS_STOPPED
+from ._common import STATUS_TRACING_STOP
+from ._common import STATUS_WAITING
+from ._common import STATUS_WAKING
+from ._common import STATUS_ZOMBIE
+from ._common import SUNOS
+from ._common import WINDOWS
+from ._common import AccessDenied
+from ._common import Error
+from ._common import NoSuchProcess
+from ._common import TimeoutExpired
+from ._common import ZombieProcess
+from ._common import memoize_when_activated
+from ._common import wrap_numbers as _wrap_numbers
+from ._compat import PY3 as _PY3
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import SubprocessTimeoutExpired as _SubprocessTimeoutExpired
+from ._compat import long
+
+
+if LINUX:
+ # This is public API and it will be retrieved from _pslinux.py
+ # via sys.modules.
+ PROCFS_PATH = "/proc"
+
+ from . import _pslinux as _psplatform
+ from ._pslinux import IOPRIO_CLASS_BE # NOQA
+ from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
+ from ._pslinux import IOPRIO_CLASS_NONE # NOQA
+ from ._pslinux import IOPRIO_CLASS_RT # NOQA
+
+elif WINDOWS:
+ from . import _pswindows as _psplatform
+ from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
+ from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
+ from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
+ from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
+ from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
+ from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
+ from ._pswindows import CONN_DELETE_TCB # NOQA
+ from ._pswindows import IOPRIO_HIGH # NOQA
+ from ._pswindows import IOPRIO_LOW # NOQA
+ from ._pswindows import IOPRIO_NORMAL # NOQA
+ from ._pswindows import IOPRIO_VERYLOW # NOQA
+
+elif MACOS:
+ from . import _psosx as _psplatform
+
+elif BSD:
+ from . import _psbsd as _psplatform
+
+elif SUNOS:
+ from . import _pssunos as _psplatform
+ from ._pssunos import CONN_BOUND # NOQA
+ from ._pssunos import CONN_IDLE # NOQA
+
+ # This is public writable API which is read from _pslinux.py and
+ # _pssunos.py via sys.modules.
+ PROCFS_PATH = "/proc"
+
+elif AIX:
+ from . import _psaix as _psplatform
+
+ # This is public API and it will be retrieved from _pslinux.py
+ # via sys.modules.
+ PROCFS_PATH = "/proc"
+
+else: # pragma: no cover
+ raise NotImplementedError('platform %s is not supported' % sys.platform)
+
+
+__all__ = [
+ # exceptions
+ "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
+ "TimeoutExpired",
+
+ # constants
+ "version_info", "__version__",
+
+ "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
+ "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
+ "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
+ "STATUS_PARKED",
+
+ "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+ "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+ "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
+ # "CONN_IDLE", "CONN_BOUND",
+
+ "AF_LINK",
+
+ "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
+
+ "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
+
+ "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
+ "SUNOS", "WINDOWS", "AIX",
+
+ # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
+ # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
+ # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
+ # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
+
+ # classes
+ "Process", "Popen",
+
+ # functions
+ "pid_exists", "pids", "process_iter", "wait_procs", # proc
+ "virtual_memory", "swap_memory", # memory
+ "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
+ "cpu_stats", # "cpu_freq", "getloadavg"
+ "net_io_counters", "net_connections", "net_if_addrs", # network
+ "net_if_stats",
+ "disk_io_counters", "disk_partitions", "disk_usage", # disk
+ # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
+ "users", "boot_time", # others
+]
+
+
+__all__.extend(_psplatform.__extra__all__)
+
+# Linux, FreeBSD
+if hasattr(_psplatform.Process, "rlimit"):
+ # Populate global namespace with RLIM* constants.
+ from . import _psutil_posix
+
+ _globals = globals()
+ _name = None
+ for _name in dir(_psutil_posix):
+ if _name.startswith('RLIM') and _name.isupper():
+ _globals[_name] = getattr(_psutil_posix, _name)
+ __all__.append(_name)
+ del _globals, _name
+
+AF_LINK = _psplatform.AF_LINK
+
+__author__ = "Giampaolo Rodola'"
+__version__ = "5.9.4"
+version_info = tuple([int(num) for num in __version__.split('.')])
+
+_timer = getattr(time, 'monotonic', time.time)
+_TOTAL_PHYMEM = None
+_LOWEST_PID = None
+_SENTINEL = object()
+
+# Sanity check in case the user messed up with psutil installation
+# or did something weird with sys.path. In this case we might end
+# up importing a python module using a C extension module which
+# was compiled for a different version of psutil.
+# We want to prevent that by failing sooner rather than later.
+# See: https://github.com/giampaolo/psutil/issues/564
+if (int(__version__.replace('.', '')) !=
+ getattr(_psplatform.cext, 'version', None)):
+ msg = "version conflict: %r C extension module was built for another " \
+ "version of psutil" % _psplatform.cext.__file__
+ if hasattr(_psplatform.cext, 'version'):
+ msg += " (%s instead of %s)" % (
+ '.'.join([x for x in str(_psplatform.cext.version)]), __version__)
+ else:
+ msg += " (different than %s)" % __version__
+ msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
+ getattr(_psplatform.cext, "__file__",
+ "the existing psutil install directory"))
+ msg += " or clean the virtual env somehow, then reinstall"
+ raise ImportError(msg)
+
+
+# =====================================================================
+# --- Utils
+# =====================================================================
+
+
+if hasattr(_psplatform, 'ppid_map'):
+ # Faster version (Windows and Linux).
+ _ppid_map = _psplatform.ppid_map
+else: # pragma: no cover
+ def _ppid_map():
+ """Return a {pid: ppid, ...} dict for all running processes in
+ one shot. Used to speed up Process.children().
+ """
+ ret = {}
+ for pid in pids():
+ try:
+ ret[pid] = _psplatform.Process(pid).ppid()
+ except (NoSuchProcess, ZombieProcess):
+ pass
+ return ret
+
+
+def _assert_pid_not_reused(fun):
+ """Decorator which raises NoSuchProcess in case a process is no
+ longer running or its PID has been reused.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ if not self.is_running():
+ if self._pid_reused:
+ msg = "process no longer exists and its PID has been reused"
+ else:
+ msg = None
+ raise NoSuchProcess(self.pid, self._name, msg=msg)
+ return fun(self, *args, **kwargs)
+ return wrapper
+
+
+def _pprint_secs(secs):
+ """Format seconds in a human readable form."""
+ now = time.time()
+ secs_ago = int(now - secs)
+ if secs_ago < 60 * 60 * 24:
+ fmt = "%H:%M:%S"
+ else:
+ fmt = "%Y-%m-%d %H:%M:%S"
+ return datetime.datetime.fromtimestamp(secs).strftime(fmt)
+
+
+# =====================================================================
+# --- Process class
+# =====================================================================
+
+
+class Process(object):
+ """Represents an OS process with the given PID.
+ If PID is omitted current process PID (os.getpid()) is used.
+ Raise NoSuchProcess if PID does not exist.
+
+ Note that most of the methods of this class do not make sure
+ the PID of the process being queried has been reused over time.
+ That means you might end up retrieving an information referring
+ to another process in case the original one this instance
+ refers to is gone in the meantime.
+
+ The only exceptions for which process identity is pre-emptively
+ checked and guaranteed are:
+
+ - parent()
+ - children()
+ - nice() (set)
+ - ionice() (set)
+ - rlimit() (set)
+ - cpu_affinity (set)
+ - suspend()
+ - resume()
+ - send_signal()
+ - terminate()
+ - kill()
+
+ To prevent this problem for all other methods you can:
+ - use is_running() before querying the process
+ - if you're continuously iterating over a set of Process
+ instances use process_iter() which pre-emptively checks
+ process identity for every yielded instance
+ """
+
+ def __init__(self, pid=None):
+ self._init(pid)
+
+ def _init(self, pid, _ignore_nsp=False):
+ if pid is None:
+ pid = os.getpid()
+ else:
+ if not _PY3 and not isinstance(pid, (int, long)):
+ raise TypeError('pid must be an integer (got %r)' % pid)
+ if pid < 0:
+ raise ValueError('pid must be a positive integer (got %s)'
+ % pid)
+ self._pid = pid
+ self._name = None
+ self._exe = None
+ self._create_time = None
+ self._gone = False
+ self._pid_reused = False
+ self._hash = None
+ self._lock = threading.RLock()
+ # used for caching on Windows only (on POSIX ppid may change)
+ self._ppid = None
+ # platform-specific modules define an _psplatform.Process
+ # implementation class
+ self._proc = _psplatform.Process(pid)
+ self._last_sys_cpu_times = None
+ self._last_proc_cpu_times = None
+ self._exitcode = _SENTINEL
+ # cache creation time for later use in is_running() method
+ try:
+ self.create_time()
+ except AccessDenied:
+ # We should never get here as AFAIK we're able to get
+ # process creation time on all platforms even as a
+ # limited user.
+ pass
+ except ZombieProcess:
+ # Zombies can still be queried by this class (although
+ # not always) and pids() return them so just go on.
+ pass
+ except NoSuchProcess:
+ if not _ignore_nsp:
+ raise NoSuchProcess(pid, msg='process PID not found')
+ else:
+ self._gone = True
+ # This pair is supposed to identify a Process instance
+ # univocally over time (the PID alone is not enough as
+ # it might refer to a process whose PID has been reused).
+ # This will be used later in __eq__() and is_running().
+ self._ident = (self.pid, self._create_time)
+
+ def __str__(self):
+ info = collections.OrderedDict()
+ info["pid"] = self.pid
+ if self._name:
+ info['name'] = self._name
+ with self.oneshot():
+ try:
+ info["name"] = self.name()
+ info["status"] = self.status()
+ except ZombieProcess:
+ info["status"] = "zombie"
+ except NoSuchProcess:
+ info["status"] = "terminated"
+ except AccessDenied:
+ pass
+ if self._exitcode not in (_SENTINEL, None):
+ info["exitcode"] = self._exitcode
+ if self._create_time:
+ info['started'] = _pprint_secs(self._create_time)
+ return "%s.%s(%s)" % (
+ self.__class__.__module__,
+ self.__class__.__name__,
+ ", ".join(["%s=%r" % (k, v) for k, v in info.items()]))
+
+ __repr__ = __str__
+
+ def __eq__(self, other):
+ # Test for equality with another Process object based
+ # on PID and creation time.
+ if not isinstance(other, Process):
+ return NotImplemented
+ return self._ident == other._ident
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ if self._hash is None:
+ self._hash = hash(self._ident)
+ return self._hash
+
+ @property
+ def pid(self):
+ """The process PID."""
+ return self._pid
+
+ # --- utility methods
+
+ @contextlib.contextmanager
+ def oneshot(self):
+ """Utility context manager which considerably speeds up the
+ retrieval of multiple process information at the same time.
+
+ Internally different process info (e.g. name, ppid, uids,
+ gids, ...) may be fetched by using the same routine, but
+ only one information is returned and the others are discarded.
+ When using this context manager the internal routine is
+ executed once (in the example below on name()) and the
+ other info are cached.
+
+ The cache is cleared when exiting the context manager block.
+ The advice is to use this every time you retrieve more than
+ one information about the process. If you're lucky, you'll
+ get a hell of a speedup.
+
+ >>> import psutil
+ >>> p = psutil.Process()
+ >>> with p.oneshot():
+ ... p.name() # collect multiple info
+ ... p.cpu_times() # return cached value
+ ... p.cpu_percent() # return cached value
+ ... p.create_time() # return cached value
+ ...
+ >>>
+ """
+ with self._lock:
+ if hasattr(self, "_cache"):
+ # NOOP: this covers the use case where the user enters the
+ # context twice:
+ #
+ # >>> with p.oneshot():
+ # ... with p.oneshot():
+ # ...
+ #
+ # Also, since as_dict() internally uses oneshot()
+ # I expect that the code below will be a pretty common
+ # "mistake" that the user will make, so let's guard
+ # against that:
+ #
+ # >>> with p.oneshot():
+ # ... p.as_dict()
+ # ...
+ yield
+ else:
+ try:
+ # cached in case cpu_percent() is used
+ self.cpu_times.cache_activate(self)
+ # cached in case memory_percent() is used
+ self.memory_info.cache_activate(self)
+ # cached in case parent() is used
+ self.ppid.cache_activate(self)
+ # cached in case username() is used
+ if POSIX:
+ self.uids.cache_activate(self)
+ # specific implementation cache
+ self._proc.oneshot_enter()
+ yield
+ finally:
+ self.cpu_times.cache_deactivate(self)
+ self.memory_info.cache_deactivate(self)
+ self.ppid.cache_deactivate(self)
+ if POSIX:
+ self.uids.cache_deactivate(self)
+ self._proc.oneshot_exit()
+
+ def as_dict(self, attrs=None, ad_value=None):
+ """Utility method returning process information as a
+ hashable dictionary.
+ If *attrs* is specified it must be a list of strings
+ reflecting available Process class' attribute names
+ (e.g. ['cpu_times', 'name']) else all public (read
+ only) attributes are assumed.
+ *ad_value* is the value which gets assigned in case
+ AccessDenied or ZombieProcess exception is raised when
+ retrieving that particular process information.
+ """
+ valid_names = _as_dict_attrnames
+ if attrs is not None:
+ if not isinstance(attrs, (list, tuple, set, frozenset)):
+ raise TypeError("invalid attrs type %s" % type(attrs))
+ attrs = set(attrs)
+ invalid_names = attrs - valid_names
+ if invalid_names:
+ raise ValueError("invalid attr name%s %s" % (
+ "s" if len(invalid_names) > 1 else "",
+ ", ".join(map(repr, invalid_names))))
+
+ retdict = dict()
+ ls = attrs or valid_names
+ with self.oneshot():
+ for name in ls:
+ try:
+ if name == 'pid':
+ ret = self.pid
+ else:
+ meth = getattr(self, name)
+ ret = meth()
+ except (AccessDenied, ZombieProcess):
+ ret = ad_value
+ except NotImplementedError:
+ # in case of not implemented functionality (may happen
+ # on old or exotic systems) we want to crash only if
+ # the user explicitly asked for that particular attr
+ if attrs:
+ raise
+ continue
+ retdict[name] = ret
+ return retdict
+
+ def parent(self):
+ """Return the parent process as a Process object pre-emptively
+ checking whether PID has been reused.
+ If no parent is known return None.
+ """
+ lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
+ if self.pid == lowest_pid:
+ return None
+ ppid = self.ppid()
+ if ppid is not None:
+ ctime = self.create_time()
+ try:
+ parent = Process(ppid)
+ if parent.create_time() <= ctime:
+ return parent
+ # ...else ppid has been reused by another process
+ except NoSuchProcess:
+ pass
+
+ def parents(self):
+ """Return the parents of this process as a list of Process
+ instances. If no parents are known return an empty list.
+ """
+ parents = []
+ proc = self.parent()
+ while proc is not None:
+ parents.append(proc)
+ proc = proc.parent()
+ return parents
+
+ def is_running(self):
+ """Return whether this process is running.
+ It also checks if PID has been reused by another process in
+ which case return False.
+ """
+ if self._gone or self._pid_reused:
+ return False
+ try:
+ # Checking if PID is alive is not enough as the PID might
+ # have been reused by another process: we also want to
+ # verify process identity.
+ # Process identity / uniqueness over time is guaranteed by
+ # (PID + creation time) and that is verified in __eq__.
+ self._pid_reused = self != Process(self.pid)
+ return not self._pid_reused
+ except ZombieProcess:
+ # We should never get here as it's already handled in
+ # Process.__init__; here just for extra safety.
+ return True
+ except NoSuchProcess:
+ self._gone = True
+ return False
+
+ # --- actual API
+
+ @memoize_when_activated
+ def ppid(self):
+ """The process parent PID.
+ On Windows the return value is cached after first call.
+ """
+ # On POSIX we don't want to cache the ppid as it may unexpectedly
+ # change to 1 (init) in case this process turns into a zombie:
+ # https://github.com/giampaolo/psutil/issues/321
+ # http://stackoverflow.com/questions/356722/
+
+ # XXX should we check creation time here rather than in
+ # Process.parent()?
+ if POSIX:
+ return self._proc.ppid()
+ else: # pragma: no cover
+ self._ppid = self._ppid or self._proc.ppid()
+ return self._ppid
+
+ def name(self):
+ """The process name. The return value is cached after first call."""
+ # Process name is only cached on Windows as on POSIX it may
+ # change, see:
+ # https://github.com/giampaolo/psutil/issues/692
+ if WINDOWS and self._name is not None:
+ return self._name
+ name = self._proc.name()
+ if POSIX and len(name) >= 15:
+ # On UNIX the name gets truncated to the first 15 characters.
+ # If it matches the first part of the cmdline we return that
+ # one instead because it's usually more explicative.
+ # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
+ try:
+ cmdline = self.cmdline()
+ except AccessDenied:
+ pass
+ else:
+ if cmdline:
+ extended_name = os.path.basename(cmdline[0])
+ if extended_name.startswith(name):
+ name = extended_name
+ self._name = name
+ self._proc._name = name
+ return name
+
+ def exe(self):
+ """The process executable as an absolute path.
+ May also be an empty string.
+ The return value is cached after first call.
+ """
+ def guess_it(fallback):
+ # try to guess exe from cmdline[0] in absence of a native
+ # exe representation
+ cmdline = self.cmdline()
+ if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
+ exe = cmdline[0] # the possible exe
+ # Attempt to guess only in case of an absolute path.
+ # It is not safe otherwise as the process might have
+ # changed cwd.
+ if (os.path.isabs(exe) and
+ os.path.isfile(exe) and
+ os.access(exe, os.X_OK)):
+ return exe
+ if isinstance(fallback, AccessDenied):
+ raise fallback
+ return fallback
+
+ if self._exe is None:
+ try:
+ exe = self._proc.exe()
+ except AccessDenied as err:
+ return guess_it(fallback=err)
+ else:
+ if not exe:
+ # underlying implementation can legitimately return an
+ # empty string; if that's the case we don't want to
+ # raise AD while guessing from the cmdline
+ try:
+ exe = guess_it(fallback=exe)
+ except AccessDenied:
+ pass
+ self._exe = exe
+ return self._exe
+
+ def cmdline(self):
+ """The command line this process has been called with."""
+ return self._proc.cmdline()
+
+ def status(self):
+ """The process current status as a STATUS_* constant."""
+ try:
+ return self._proc.status()
+ except ZombieProcess:
+ return STATUS_ZOMBIE
+
+ def username(self):
+ """The name of the user that owns the process.
+ On UNIX this is calculated by using *real* process uid.
+ """
+ if POSIX:
+ if pwd is None:
+ # might happen if python was installed from sources
+ raise ImportError(
+ "requires pwd module shipped with standard python")
+ real_uid = self.uids().real
+ try:
+ return pwd.getpwuid(real_uid).pw_name
+ except KeyError:
+ # the uid can't be resolved by the system
+ return str(real_uid)
+ else:
+ return self._proc.username()
+
+ def create_time(self):
+ """The process creation time as a floating point number
+ expressed in seconds since the epoch.
+ The return value is cached after first call.
+ """
+ if self._create_time is None:
+ self._create_time = self._proc.create_time()
+ return self._create_time
+
+ def cwd(self):
+ """Process current working directory as an absolute path."""
+ return self._proc.cwd()
+
+ def nice(self, value=None):
+ """Get or set process niceness (priority)."""
+ if value is None:
+ return self._proc.nice_get()
+ else:
+ if not self.is_running():
+ raise NoSuchProcess(self.pid, self._name)
+ self._proc.nice_set(value)
+
+ if POSIX:
+
+ @memoize_when_activated
+ def uids(self):
+ """Return process UIDs as a (real, effective, saved)
+ namedtuple.
+ """
+ return self._proc.uids()
+
+ def gids(self):
+ """Return process GIDs as a (real, effective, saved)
+ namedtuple.
+ """
+ return self._proc.gids()
+
+ def terminal(self):
+ """The terminal associated with this process, if any,
+ else None.
+ """
+ return self._proc.terminal()
+
+ def num_fds(self):
+ """Return the number of file descriptors opened by this
+ process (POSIX only).
+ """
+ return self._proc.num_fds()
+
+ # Linux, BSD, AIX and Windows only
+ if hasattr(_psplatform.Process, "io_counters"):
+
+ def io_counters(self):
+ """Return process I/O statistics as a
+ (read_count, write_count, read_bytes, write_bytes)
+ namedtuple.
+ Those are the number of read/write calls performed and the
+ amount of bytes read and written by the process.
+ """
+ return self._proc.io_counters()
+
+ # Linux and Windows
+ if hasattr(_psplatform.Process, "ionice_get"):
+
+ def ionice(self, ioclass=None, value=None):
+ """Get or set process I/O niceness (priority).
+
+ On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
+ *value* is a number which goes from 0 to 7. The higher the
+ value, the lower the I/O priority of the process.
+
+ On Windows only *ioclass* is used and it can be set to 2
+ (normal), 1 (low) or 0 (very low).
+
+ Available on Linux and Windows > Vista only.
+ """
+ if ioclass is None:
+ if value is not None:
+ raise ValueError("'ioclass' argument must be specified")
+ return self._proc.ionice_get()
+ else:
+ return self._proc.ionice_set(ioclass, value)
+
+ # Linux / FreeBSD only
+ if hasattr(_psplatform.Process, "rlimit"):
+
+ def rlimit(self, resource, limits=None):
+ """Get or set process resource limits as a (soft, hard)
+ tuple.
+
+ *resource* is one of the RLIMIT_* constants.
+ *limits* is supposed to be a (soft, hard) tuple.
+
+ See "man prlimit" for further info.
+ Available on Linux and FreeBSD only.
+ """
+ return self._proc.rlimit(resource, limits)
+
+ # Windows, Linux and FreeBSD only
+ if hasattr(_psplatform.Process, "cpu_affinity_get"):
+
+ def cpu_affinity(self, cpus=None):
+ """Get or set process CPU affinity.
+ If specified, *cpus* must be a list of CPUs for which you
+ want to set the affinity (e.g. [0, 1]).
+ If an empty list is passed, all egible CPUs are assumed
+ (and set).
+ (Windows, Linux and BSD only).
+ """
+ if cpus is None:
+ return sorted(set(self._proc.cpu_affinity_get()))
+ else:
+ if not cpus:
+ if hasattr(self._proc, "_get_eligible_cpus"):
+ cpus = self._proc._get_eligible_cpus()
+ else:
+ cpus = tuple(range(len(cpu_times(percpu=True))))
+ self._proc.cpu_affinity_set(list(set(cpus)))
+
+ # Linux, FreeBSD, SunOS
+ if hasattr(_psplatform.Process, "cpu_num"):
+
+ def cpu_num(self):
+ """Return what CPU this process is currently running on.
+ The returned number should be <= psutil.cpu_count()
+ and <= len(psutil.cpu_percent(percpu=True)).
+ It may be used in conjunction with
+ psutil.cpu_percent(percpu=True) to observe the system
+ workload distributed across CPUs.
+ """
+ return self._proc.cpu_num()
+
+ # All platforms has it, but maybe not in the future.
+ if hasattr(_psplatform.Process, "environ"):
+
+ def environ(self):
+ """The environment variables of the process as a dict. Note: this
+ might not reflect changes made after the process started. """
+ return self._proc.environ()
+
+ if WINDOWS:
+
+ def num_handles(self):
+ """Return the number of handles opened by this process
+ (Windows only).
+ """
+ return self._proc.num_handles()
+
+ def num_ctx_switches(self):
+ """Return the number of voluntary and involuntary context
+ switches performed by this process.
+ """
+ return self._proc.num_ctx_switches()
+
+ def num_threads(self):
+ """Return the number of threads used by this process."""
+ return self._proc.num_threads()
+
+ if hasattr(_psplatform.Process, "threads"):
+
+ def threads(self):
+ """Return threads opened by process as a list of
+ (id, user_time, system_time) namedtuples representing
+ thread id and thread CPU times (user/system).
+ On OpenBSD this method requires root access.
+ """
+ return self._proc.threads()
+
+ @_assert_pid_not_reused
+ def children(self, recursive=False):
+ """Return the children of this process as a list of Process
+ instances, pre-emptively checking whether PID has been reused.
+ If *recursive* is True return all the parent descendants.
+
+ Example (A == this process):
+
+ A ─┐
+ │
+ ├─ B (child) ─┐
+ │ └─ X (grandchild) ─┐
+ │ └─ Y (great grandchild)
+ ├─ C (child)
+ └─ D (child)
+
+ >>> import psutil
+ >>> p = psutil.Process()
+ >>> p.children()
+ B, C, D
+ >>> p.children(recursive=True)
+ B, X, Y, C, D
+
+ Note that in the example above if process X disappears
+ process Y won't be listed as the reference to process A
+ is lost.
+ """
+ ppid_map = _ppid_map()
+ ret = []
+ if not recursive:
+ for pid, ppid in ppid_map.items():
+ if ppid == self.pid:
+ try:
+ child = Process(pid)
+ # if child happens to be older than its parent
+ # (self) it means child's PID has been reused
+ if self.create_time() <= child.create_time():
+ ret.append(child)
+ except (NoSuchProcess, ZombieProcess):
+ pass
+ else:
+ # Construct a {pid: [child pids]} dict
+ reverse_ppid_map = collections.defaultdict(list)
+ for pid, ppid in ppid_map.items():
+ reverse_ppid_map[ppid].append(pid)
+ # Recursively traverse that dict, starting from self.pid,
+ # such that we only call Process() on actual children
+ seen = set()
+ stack = [self.pid]
+ while stack:
+ pid = stack.pop()
+ if pid in seen:
+ # Since pids can be reused while the ppid_map is
+ # constructed, there may be rare instances where
+ # there's a cycle in the recorded process "tree".
+ continue
+ seen.add(pid)
+ for child_pid in reverse_ppid_map[pid]:
+ try:
+ child = Process(child_pid)
+ # if child happens to be older than its parent
+ # (self) it means child's PID has been reused
+ intime = self.create_time() <= child.create_time()
+ if intime:
+ ret.append(child)
+ stack.append(child_pid)
+ except (NoSuchProcess, ZombieProcess):
+ pass
+ return ret
+
+ def cpu_percent(self, interval=None):
+ """Return a float representing the current process CPU
+ utilization as a percentage.
+
+ When *interval* is 0.0 or None (default) compares process times
+ to system CPU times elapsed since last call, returning
+ immediately (non-blocking). That means that the first time
+ this is called it will return a meaningful 0.0 value.
+
+ When *interval* is > 0.0 compares process times to system CPU
+ times elapsed before and after the interval (blocking).
+
+ In this case is recommended for accuracy that this function
+ be called with at least 0.1 seconds between calls.
+
+ A value > 100.0 can be returned in case of processes running
+ multiple threads on different CPU cores.
+
+ The returned value is explicitly NOT split evenly between
+ all available logical CPUs. This means that a busy loop process
+ running on a system with 2 logical CPUs will be reported as
+ having 100% CPU utilization instead of 50%.
+
+ Examples:
+
+ >>> import psutil
+ >>> p = psutil.Process(os.getpid())
+ >>> # blocking
+ >>> p.cpu_percent(interval=1)
+ 2.0
+ >>> # non-blocking (percentage since last call)
+ >>> p.cpu_percent(interval=None)
+ 2.9
+ >>>
+ """
+ blocking = interval is not None and interval > 0.0
+ if interval is not None and interval < 0:
+ raise ValueError("interval is not positive (got %r)" % interval)
+ num_cpus = cpu_count() or 1
+
+ def timer():
+ return _timer() * num_cpus
+
+ if blocking:
+ st1 = timer()
+ pt1 = self._proc.cpu_times()
+ time.sleep(interval)
+ st2 = timer()
+ pt2 = self._proc.cpu_times()
+ else:
+ st1 = self._last_sys_cpu_times
+ pt1 = self._last_proc_cpu_times
+ st2 = timer()
+ pt2 = self._proc.cpu_times()
+ if st1 is None or pt1 is None:
+ self._last_sys_cpu_times = st2
+ self._last_proc_cpu_times = pt2
+ return 0.0
+
+ delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
+ delta_time = st2 - st1
+ # reset values for next call in case of interval == None
+ self._last_sys_cpu_times = st2
+ self._last_proc_cpu_times = pt2
+
+ try:
+ # This is the utilization split evenly between all CPUs.
+ # E.g. a busy loop process on a 2-CPU-cores system at this
+ # point is reported as 50% instead of 100%.
+ overall_cpus_percent = ((delta_proc / delta_time) * 100)
+ except ZeroDivisionError:
+ # interval was too low
+ return 0.0
+ else:
+ # Note 1:
+ # in order to emulate "top" we multiply the value for the num
+ # of CPU cores. This way the busy process will be reported as
+ # having 100% (or more) usage.
+ #
+ # Note 2:
+ # taskmgr.exe on Windows differs in that it will show 50%
+ # instead.
+ #
+ # Note 3:
+ # a percentage > 100 is legitimate as it can result from a
+ # process with multiple threads running on different CPU
+ # cores (top does the same), see:
+ # http://stackoverflow.com/questions/1032357
+ # https://github.com/giampaolo/psutil/issues/474
+ single_cpu_percent = overall_cpus_percent * num_cpus
+ return round(single_cpu_percent, 1)
+
+ @memoize_when_activated
+ def cpu_times(self):
+ """Return a (user, system, children_user, children_system)
+ namedtuple representing the accumulated process time, in
+ seconds.
+ This is similar to os.times() but per-process.
+ On macOS and Windows children_user and children_system are
+ always set to 0.
+ """
+ return self._proc.cpu_times()
+
+ @memoize_when_activated
+ def memory_info(self):
+ """Return a namedtuple with variable fields depending on the
+ platform, representing memory information about the process.
+
+ The "portable" fields available on all platforms are `rss` and `vms`.
+
+ All numbers are expressed in bytes.
+ """
+ return self._proc.memory_info()
+
+ @_common.deprecated_method(replacement="memory_info")
+ def memory_info_ex(self):
+ return self.memory_info()
+
+ def memory_full_info(self):
+ """This method returns the same information as memory_info(),
+ plus, on some platform (Linux, macOS, Windows), also provides
+ additional metrics (USS, PSS and swap).
+ The additional metrics provide a better representation of actual
+ process memory usage.
+
+ Namely USS is the memory which is unique to a process and which
+ would be freed if the process was terminated right now.
+
+ It does so by passing through the whole process address.
+ As such it usually requires higher user privileges than
+ memory_info() and is considerably slower.
+ """
+ return self._proc.memory_full_info()
+
+ def memory_percent(self, memtype="rss"):
+ """Compare process memory to total physical system memory and
+ calculate process memory utilization as a percentage.
+ *memtype* argument is a string that dictates what type of
+ process memory you want to compare against (defaults to "rss").
+ The list of available strings can be obtained like this:
+
+ >>> psutil.Process().memory_info()._fields
+ ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
+ """
+ valid_types = list(_psplatform.pfullmem._fields)
+ if memtype not in valid_types:
+ raise ValueError("invalid memtype %r; valid types are %r" % (
+ memtype, tuple(valid_types)))
+ fun = self.memory_info if memtype in _psplatform.pmem._fields else \
+ self.memory_full_info
+ metrics = fun()
+ value = getattr(metrics, memtype)
+
+ # use cached value if available
+ total_phymem = _TOTAL_PHYMEM or virtual_memory().total
+ if not total_phymem > 0:
+ # we should never get here
+ raise ValueError(
+ "can't calculate process memory percent because "
+ "total physical system memory is not positive (%r)"
+ % total_phymem)
+ return (value / float(total_phymem)) * 100
+
+ if hasattr(_psplatform.Process, "memory_maps"):
+ def memory_maps(self, grouped=True):
+ """Return process' mapped memory regions as a list of namedtuples
+ whose fields are variable depending on the platform.
+
+ If *grouped* is True the mapped regions with the same 'path'
+ are grouped together and the different memory fields are summed.
+
+ If *grouped* is False every mapped region is shown as a single
+ entity and the namedtuple will also include the mapped region's
+ address space ('addr') and permission set ('perms').
+ """
+ it = self._proc.memory_maps()
+ if grouped:
+ d = {}
+ for tupl in it:
+ path = tupl[2]
+ nums = tupl[3:]
+ try:
+ d[path] = map(lambda x, y: x + y, d[path], nums)
+ except KeyError:
+ d[path] = nums
+ nt = _psplatform.pmmap_grouped
+ return [nt(path, *d[path]) for path in d] # NOQA
+ else:
+ nt = _psplatform.pmmap_ext
+ return [nt(*x) for x in it]
+
+ def open_files(self):
+ """Return files opened by process as a list of
+ (path, fd) namedtuples including the absolute file name
+ and file descriptor number.
+ """
+ return self._proc.open_files()
+
+ def connections(self, kind='inet'):
+ """Return socket connections opened by process as a list of
+ (fd, family, type, laddr, raddr, status) namedtuples.
+ The *kind* parameter filters for connections that match the
+ following criteria:
+
+ +------------+----------------------------------------------------+
+ | Kind Value | Connections using |
+ +------------+----------------------------------------------------+
+ | inet | IPv4 and IPv6 |
+ | inet4 | IPv4 |
+ | inet6 | IPv6 |
+ | tcp | TCP |
+ | tcp4 | TCP over IPv4 |
+ | tcp6 | TCP over IPv6 |
+ | udp | UDP |
+ | udp4 | UDP over IPv4 |
+ | udp6 | UDP over IPv6 |
+ | unix | UNIX socket (both UDP and TCP protocols) |
+ | all | the sum of all the possible families and protocols |
+ +------------+----------------------------------------------------+
+ """
+ return self._proc.connections(kind)
+
+ # --- signals
+
+ if POSIX:
+ def _send_signal(self, sig):
+ assert not self.pid < 0, self.pid
+ if self.pid == 0:
+ # see "man 2 kill"
+ raise ValueError(
+ "preventing sending signal to process with PID 0 as it "
+ "would affect every process in the process group of the "
+ "calling process (os.getpid()) instead of PID 0")
+ try:
+ os.kill(self.pid, sig)
+ except ProcessLookupError:
+ if OPENBSD and pid_exists(self.pid):
+ # We do this because os.kill() lies in case of
+ # zombie processes.
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ self._gone = True
+ raise NoSuchProcess(self.pid, self._name)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+
+ @_assert_pid_not_reused
+ def send_signal(self, sig):
+ """Send a signal *sig* to process pre-emptively checking
+ whether PID has been reused (see signal module constants) .
+ On Windows only SIGTERM is valid and is treated as an alias
+ for kill().
+ """
+ if POSIX:
+ self._send_signal(sig)
+ else: # pragma: no cover
+ self._proc.send_signal(sig)
+
+ @_assert_pid_not_reused
+ def suspend(self):
+ """Suspend process execution with SIGSTOP pre-emptively checking
+ whether PID has been reused.
+ On Windows this has the effect of suspending all process threads.
+ """
+ if POSIX:
+ self._send_signal(signal.SIGSTOP)
+ else: # pragma: no cover
+ self._proc.suspend()
+
+ @_assert_pid_not_reused
+ def resume(self):
+ """Resume process execution with SIGCONT pre-emptively checking
+ whether PID has been reused.
+ On Windows this has the effect of resuming all process threads.
+ """
+ if POSIX:
+ self._send_signal(signal.SIGCONT)
+ else: # pragma: no cover
+ self._proc.resume()
+
+ @_assert_pid_not_reused
+ def terminate(self):
+ """Terminate the process with SIGTERM pre-emptively checking
+ whether PID has been reused.
+ On Windows this is an alias for kill().
+ """
+ if POSIX:
+ self._send_signal(signal.SIGTERM)
+ else: # pragma: no cover
+ self._proc.kill()
+
+ @_assert_pid_not_reused
+ def kill(self):
+ """Kill the current process with SIGKILL pre-emptively checking
+ whether PID has been reused.
+ """
+ if POSIX:
+ self._send_signal(signal.SIGKILL)
+ else: # pragma: no cover
+ self._proc.kill()
+
+ def wait(self, timeout=None):
+ """Wait for process to terminate and, if process is a children
+ of os.getpid(), also return its exit code, else None.
+ On Windows there's no such limitation (exit code is always
+ returned).
+
+ If the process is already terminated immediately return None
+ instead of raising NoSuchProcess.
+
+ If *timeout* (in seconds) is specified and process is still
+ alive raise TimeoutExpired.
+
+ To wait for multiple Process(es) use psutil.wait_procs().
+ """
+ if timeout is not None and not timeout >= 0:
+ raise ValueError("timeout must be a positive integer")
+ if self._exitcode is not _SENTINEL:
+ return self._exitcode
+ self._exitcode = self._proc.wait(timeout)
+ return self._exitcode
+
+
+# The valid attr names which can be processed by Process.as_dict().
+_as_dict_attrnames = set(
+ [x for x in dir(Process) if not x.startswith('_') and x not in
+ ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
+ 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
+ 'memory_info_ex', 'oneshot']])
+
+
+# =====================================================================
+# --- Popen class
+# =====================================================================
+
+
+class Popen(Process):
+ """Same as subprocess.Popen, but in addition it provides all
+ psutil.Process methods in a single class.
+ For the following methods which are common to both classes, psutil
+ implementation takes precedence:
+
+ * send_signal()
+ * terminate()
+ * kill()
+
+ This is done in order to avoid killing another process in case its
+ PID has been reused, fixing BPO-6973.
+
+ >>> import psutil
+ >>> from subprocess import PIPE
+ >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
+ >>> p.name()
+ 'python'
+ >>> p.uids()
+ user(real=1000, effective=1000, saved=1000)
+ >>> p.username()
+ 'giampaolo'
+ >>> p.communicate()
+ ('hi\n', None)
+ >>> p.terminate()
+ >>> p.wait(timeout=2)
+ 0
+ >>>
+ """
+
+ def __init__(self, *args, **kwargs):
+ # Explicitly avoid to raise NoSuchProcess in case the process
+ # spawned by subprocess.Popen terminates too quickly, see:
+ # https://github.com/giampaolo/psutil/issues/193
+ self.__subproc = subprocess.Popen(*args, **kwargs)
+ self._init(self.__subproc.pid, _ignore_nsp=True)
+
+ def __dir__(self):
+ return sorted(set(dir(Popen) + dir(subprocess.Popen)))
+
+ def __enter__(self):
+ if hasattr(self.__subproc, '__enter__'):
+ self.__subproc.__enter__()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ if hasattr(self.__subproc, '__exit__'):
+ return self.__subproc.__exit__(*args, **kwargs)
+ else:
+ if self.stdout:
+ self.stdout.close()
+ if self.stderr:
+ self.stderr.close()
+ try:
+ # Flushing a BufferedWriter may raise an error.
+ if self.stdin:
+ self.stdin.close()
+ finally:
+ # Wait for the process to terminate, to avoid zombies.
+ self.wait()
+
+ def __getattribute__(self, name):
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ try:
+ return object.__getattribute__(self.__subproc, name)
+ except AttributeError:
+ raise AttributeError("%s instance has no attribute '%s'"
+ % (self.__class__.__name__, name))
+
+ def wait(self, timeout=None):
+ if self.__subproc.returncode is not None:
+ return self.__subproc.returncode
+ ret = super(Popen, self).wait(timeout)
+ self.__subproc.returncode = ret
+ return ret
+
+
+# =====================================================================
+# --- system processes related functions
+# =====================================================================
+
+
+def pids():
+ """Return a list of current running PIDs."""
+ global _LOWEST_PID
+ ret = sorted(_psplatform.pids())
+ _LOWEST_PID = ret[0]
+ return ret
+
+
+def pid_exists(pid):
+ """Return True if given PID exists in the current process list.
+ This is faster than doing "pid in psutil.pids()" and
+ should be preferred.
+ """
+ if pid < 0:
+ return False
+ elif pid == 0 and POSIX:
+ # On POSIX we use os.kill() to determine PID existence.
+ # According to "man 2 kill" PID 0 has a special meaning
+ # though: it refers to <<every process in the process
+ # group of the calling process>> and that is not we want
+ # to do here.
+ return pid in pids()
+ else:
+ return _psplatform.pid_exists(pid)
+
+
+_pmap = {}
+
+
+def process_iter(attrs=None, ad_value=None):
+ """Return a generator yielding a Process instance for all
+ running processes.
+
+ Every new Process instance is only created once and then cached
+ into an internal table which is updated every time this is used.
+
+ Cached Process instances are checked for identity so that you're
+ safe in case a PID has been reused by another process, in which
+ case the cached instance is updated.
+
+ The sorting order in which processes are yielded is based on
+ their PIDs.
+
+ *attrs* and *ad_value* have the same meaning as in
+ Process.as_dict(). If *attrs* is specified as_dict() is called
+ and the resulting dict is stored as a 'info' attribute attached
+ to returned Process instance.
+ If *attrs* is an empty list it will retrieve all process info
+ (slow).
+ """
+ global _pmap
+
+ def add(pid):
+ proc = Process(pid)
+ if attrs is not None:
+ proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
+ pmap[proc.pid] = proc
+ return proc
+
+ def remove(pid):
+ pmap.pop(pid, None)
+
+ pmap = _pmap.copy()
+ a = set(pids())
+ b = set(pmap.keys())
+ new_pids = a - b
+ gone_pids = b - a
+ for pid in gone_pids:
+ remove(pid)
+ try:
+ ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
+ for pid, proc in ls:
+ try:
+ if proc is None: # new process
+ yield add(pid)
+ else:
+ # use is_running() to check whether PID has been
+ # reused by another process in which case yield a
+ # new Process instance
+ if proc.is_running():
+ if attrs is not None:
+ proc.info = proc.as_dict(
+ attrs=attrs, ad_value=ad_value)
+ yield proc
+ else:
+ yield add(pid)
+ except NoSuchProcess:
+ remove(pid)
+ except AccessDenied:
+ # Process creation time can't be determined hence there's
+ # no way to tell whether the pid of the cached process
+ # has been reused. Just return the cached version.
+ if proc is None and pid in pmap:
+ try:
+ yield pmap[pid]
+ except KeyError:
+ # If we get here it is likely that 2 threads were
+ # using process_iter().
+ pass
+ else:
+ raise
+ finally:
+ _pmap = pmap
+
+
+def wait_procs(procs, timeout=None, callback=None):
+ """Convenience function which waits for a list of processes to
+ terminate.
+
+ Return a (gone, alive) tuple indicating which processes
+ are gone and which ones are still alive.
+
+ The gone ones will have a new *returncode* attribute indicating
+ process exit status (may be None).
+
+ *callback* is a function which gets called every time a process
+ terminates (a Process instance is passed as callback argument).
+
+ Function will return as soon as all processes terminate or when
+ *timeout* occurs.
+ Differently from Process.wait() it will not raise TimeoutExpired if
+ *timeout* occurs.
+
+ Typical use case is:
+
+ - send SIGTERM to a list of processes
+ - give them some time to terminate
+ - send SIGKILL to those ones which are still alive
+
+ Example:
+
+ >>> def on_terminate(proc):
+ ... print("process {} terminated".format(proc))
+ ...
+ >>> for p in procs:
+ ... p.terminate()
+ ...
+ >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
+ >>> for p in alive:
+ ... p.kill()
+ """
+ def check_gone(proc, timeout):
+ try:
+ returncode = proc.wait(timeout=timeout)
+ except TimeoutExpired:
+ pass
+ except _SubprocessTimeoutExpired:
+ pass
+ else:
+ if returncode is not None or not proc.is_running():
+ # Set new Process instance attribute.
+ proc.returncode = returncode
+ gone.add(proc)
+ if callback is not None:
+ callback(proc)
+
+ if timeout is not None and not timeout >= 0:
+ msg = "timeout must be a positive integer, got %s" % timeout
+ raise ValueError(msg)
+ gone = set()
+ alive = set(procs)
+ if callback is not None and not callable(callback):
+ raise TypeError("callback %r is not a callable" % callable)
+ if timeout is not None:
+ deadline = _timer() + timeout
+
+ while alive:
+ if timeout is not None and timeout <= 0:
+ break
+ for proc in alive:
+ # Make sure that every complete iteration (all processes)
+ # will last max 1 sec.
+ # We do this because we don't want to wait too long on a
+ # single process: in case it terminates too late other
+ # processes may disappear in the meantime and their PID
+ # reused.
+ max_timeout = 1.0 / len(alive)
+ if timeout is not None:
+ timeout = min((deadline - _timer()), max_timeout)
+ if timeout <= 0:
+ break
+ check_gone(proc, timeout)
+ else:
+ check_gone(proc, max_timeout)
+ alive = alive - gone
+
+ if alive:
+ # Last attempt over processes survived so far.
+ # timeout == 0 won't make this function wait any further.
+ for proc in alive:
+ check_gone(proc, 0)
+ alive = alive - gone
+
+ return (list(gone), list(alive))
+
+
+# =====================================================================
+# --- CPU related functions
+# =====================================================================
+
+
+def cpu_count(logical=True):
+ """Return the number of logical CPUs in the system (same as
+ os.cpu_count() in Python 3.4).
+
+ If *logical* is False return the number of physical cores only
+ (e.g. hyper thread CPUs are excluded).
+
+ Return None if undetermined.
+
+ The return value is cached after first call.
+ If desired cache can be cleared like this:
+
+ >>> psutil.cpu_count.cache_clear()
+ """
+ if logical:
+ ret = _psplatform.cpu_count_logical()
+ else:
+ ret = _psplatform.cpu_count_cores()
+ if ret is not None and ret < 1:
+ ret = None
+ return ret
+
+
+def cpu_times(percpu=False):
+ """Return system-wide CPU times as a namedtuple.
+ Every CPU time represents the seconds the CPU has spent in the
+ given mode. The namedtuple's fields availability varies depending on the
+ platform:
+
+ - user
+ - system
+ - idle
+ - nice (UNIX)
+ - iowait (Linux)
+ - irq (Linux, FreeBSD)
+ - softirq (Linux)
+ - steal (Linux >= 2.6.11)
+ - guest (Linux >= 2.6.24)
+ - guest_nice (Linux >= 3.2.0)
+
+ When *percpu* is True return a list of namedtuples for each CPU.
+ First element of the list refers to first CPU, second element
+ to second CPU and so on.
+ The order of the list is consistent across calls.
+ """
+ if not percpu:
+ return _psplatform.cpu_times()
+ else:
+ return _psplatform.per_cpu_times()
+
+
+try:
+ _last_cpu_times = cpu_times()
+except Exception:
+ # Don't want to crash at import time.
+ _last_cpu_times = None
+
+try:
+ _last_per_cpu_times = cpu_times(percpu=True)
+except Exception:
+ # Don't want to crash at import time.
+ _last_per_cpu_times = None
+
+
+def _cpu_tot_time(times):
+ """Given a cpu_time() ntuple calculates the total CPU time
+ (including idle time).
+ """
+ tot = sum(times)
+ if LINUX:
+ # On Linux guest times are already accounted in "user" or
+ # "nice" times, so we subtract them from total.
+ # Htop does the same. References:
+ # https://github.com/giampaolo/psutil/pull/940
+ # http://unix.stackexchange.com/questions/178045
+ # https://github.com/torvalds/linux/blob/
+ # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
+ # cputime.c#L158
+ tot -= getattr(times, "guest", 0) # Linux 2.6.24+
+ tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
+ return tot
+
+
+def _cpu_busy_time(times):
+ """Given a cpu_time() ntuple calculates the busy CPU time.
+ We do so by subtracting all idle CPU times.
+ """
+ busy = _cpu_tot_time(times)
+ busy -= times.idle
+ # Linux: "iowait" is time during which the CPU does not do anything
+ # (waits for IO to complete). On Linux IO wait is *not* accounted
+ # in "idle" time so we subtract it. Htop does the same.
+ # References:
+ # https://github.com/torvalds/linux/blob/
+ # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
+ busy -= getattr(times, "iowait", 0)
+ return busy
+
+
+def _cpu_times_deltas(t1, t2):
+ assert t1._fields == t2._fields, (t1, t2)
+ field_deltas = []
+ for field in _psplatform.scputimes._fields:
+ field_delta = getattr(t2, field) - getattr(t1, field)
+ # CPU times are always supposed to increase over time
+ # or at least remain the same and that's because time
+ # cannot go backwards.
+ # Surprisingly sometimes this might not be the case (at
+ # least on Windows and Linux), see:
+ # https://github.com/giampaolo/psutil/issues/392
+ # https://github.com/giampaolo/psutil/issues/645
+ # https://github.com/giampaolo/psutil/issues/1210
+ # Trim negative deltas to zero to ignore decreasing fields.
+ # top does the same. Reference:
+ # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
+ field_delta = max(0, field_delta)
+ field_deltas.append(field_delta)
+ return _psplatform.scputimes(*field_deltas)
+
+
+def cpu_percent(interval=None, percpu=False):
+ """Return a float representing the current system-wide CPU
+ utilization as a percentage.
+
+ When *interval* is > 0.0 compares system CPU times elapsed before
+ and after the interval (blocking).
+
+ When *interval* is 0.0 or None compares system CPU times elapsed
+ since last call or module import, returning immediately (non
+ blocking). That means the first time this is called it will
+ return a meaningless 0.0 value which you should ignore.
+ In this case is recommended for accuracy that this function be
+ called with at least 0.1 seconds between calls.
+
+ When *percpu* is True returns a list of floats representing the
+ utilization as a percentage for each CPU.
+ First element of the list refers to first CPU, second element
+ to second CPU and so on.
+ The order of the list is consistent across calls.
+
+ Examples:
+
+ >>> # blocking, system-wide
+ >>> psutil.cpu_percent(interval=1)
+ 2.0
+ >>>
+ >>> # blocking, per-cpu
+ >>> psutil.cpu_percent(interval=1, percpu=True)
+ [2.0, 1.0]
+ >>>
+ >>> # non-blocking (percentage since last call)
+ >>> psutil.cpu_percent(interval=None)
+ 2.9
+ >>>
+ """
+ global _last_cpu_times
+ global _last_per_cpu_times
+ blocking = interval is not None and interval > 0.0
+ if interval is not None and interval < 0:
+ raise ValueError("interval is not positive (got %r)" % interval)
+
+ def calculate(t1, t2):
+ times_delta = _cpu_times_deltas(t1, t2)
+ all_delta = _cpu_tot_time(times_delta)
+ busy_delta = _cpu_busy_time(times_delta)
+
+ try:
+ busy_perc = (busy_delta / all_delta) * 100
+ except ZeroDivisionError:
+ return 0.0
+ else:
+ return round(busy_perc, 1)
+
+ # system-wide usage
+ if not percpu:
+ if blocking:
+ t1 = cpu_times()
+ time.sleep(interval)
+ else:
+ t1 = _last_cpu_times
+ if t1 is None:
+ # Something bad happened at import time. We'll
+ # get a meaningful result on the next call. See:
+ # https://github.com/giampaolo/psutil/pull/715
+ t1 = cpu_times()
+ _last_cpu_times = cpu_times()
+ return calculate(t1, _last_cpu_times)
+ # per-cpu usage
+ else:
+ ret = []
+ if blocking:
+ tot1 = cpu_times(percpu=True)
+ time.sleep(interval)
+ else:
+ tot1 = _last_per_cpu_times
+ if tot1 is None:
+ # Something bad happened at import time. We'll
+ # get a meaningful result on the next call. See:
+ # https://github.com/giampaolo/psutil/pull/715
+ tot1 = cpu_times(percpu=True)
+ _last_per_cpu_times = cpu_times(percpu=True)
+ for t1, t2 in zip(tot1, _last_per_cpu_times):
+ ret.append(calculate(t1, t2))
+ return ret
+
+
+# Use separate global vars for cpu_times_percent() so that it's
+# independent from cpu_percent() and they can both be used within
+# the same program.
+_last_cpu_times_2 = _last_cpu_times
+_last_per_cpu_times_2 = _last_per_cpu_times
+
+
+def cpu_times_percent(interval=None, percpu=False):
+ """Same as cpu_percent() but provides utilization percentages
+ for each specific CPU time as is returned by cpu_times().
+ For instance, on Linux we'll get:
+
+ >>> cpu_times_percent()
+ cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
+ irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
+ >>>
+
+ *interval* and *percpu* arguments have the same meaning as in
+ cpu_percent().
+ """
+ global _last_cpu_times_2
+ global _last_per_cpu_times_2
+ blocking = interval is not None and interval > 0.0
+ if interval is not None and interval < 0:
+ raise ValueError("interval is not positive (got %r)" % interval)
+
+ def calculate(t1, t2):
+ nums = []
+ times_delta = _cpu_times_deltas(t1, t2)
+ all_delta = _cpu_tot_time(times_delta)
+ # "scale" is the value to multiply each delta with to get percentages.
+ # We use "max" to avoid division by zero (if all_delta is 0, then all
+ # fields are 0 so percentages will be 0 too. all_delta cannot be a
+ # fraction because cpu times are integers)
+ scale = 100.0 / max(1, all_delta)
+ for field_delta in times_delta:
+ field_perc = field_delta * scale
+ field_perc = round(field_perc, 1)
+ # make sure we don't return negative values or values over 100%
+ field_perc = min(max(0.0, field_perc), 100.0)
+ nums.append(field_perc)
+ return _psplatform.scputimes(*nums)
+
+ # system-wide usage
+ if not percpu:
+ if blocking:
+ t1 = cpu_times()
+ time.sleep(interval)
+ else:
+ t1 = _last_cpu_times_2
+ if t1 is None:
+ # Something bad happened at import time. We'll
+ # get a meaningful result on the next call. See:
+ # https://github.com/giampaolo/psutil/pull/715
+ t1 = cpu_times()
+ _last_cpu_times_2 = cpu_times()
+ return calculate(t1, _last_cpu_times_2)
+ # per-cpu usage
+ else:
+ ret = []
+ if blocking:
+ tot1 = cpu_times(percpu=True)
+ time.sleep(interval)
+ else:
+ tot1 = _last_per_cpu_times_2
+ if tot1 is None:
+ # Something bad happened at import time. We'll
+ # get a meaningful result on the next call. See:
+ # https://github.com/giampaolo/psutil/pull/715
+ tot1 = cpu_times(percpu=True)
+ _last_per_cpu_times_2 = cpu_times(percpu=True)
+ for t1, t2 in zip(tot1, _last_per_cpu_times_2):
+ ret.append(calculate(t1, t2))
+ return ret
+
+
+def cpu_stats():
+ """Return CPU statistics."""
+ return _psplatform.cpu_stats()
+
+
+if hasattr(_psplatform, "cpu_freq"):
+
+ def cpu_freq(percpu=False):
+ """Return CPU frequency as a namedtuple including current,
+ min and max frequency expressed in Mhz.
+
+ If *percpu* is True and the system supports per-cpu frequency
+ retrieval (Linux only) a list of frequencies is returned for
+ each CPU. If not a list with one element is returned.
+ """
+ ret = _psplatform.cpu_freq()
+ if percpu:
+ return ret
+ else:
+ num_cpus = float(len(ret))
+ if num_cpus == 0:
+ return None
+ elif num_cpus == 1:
+ return ret[0]
+ else:
+ currs, mins, maxs = 0.0, 0.0, 0.0
+ set_none = False
+ for cpu in ret:
+ currs += cpu.current
+ # On Linux if /proc/cpuinfo is used min/max are set
+ # to None.
+ if LINUX and cpu.min is None:
+ set_none = True
+ continue
+ mins += cpu.min
+ maxs += cpu.max
+
+ current = currs / num_cpus
+
+ if set_none:
+ min_ = max_ = None
+ else:
+ min_ = mins / num_cpus
+ max_ = maxs / num_cpus
+
+ return _common.scpufreq(current, min_, max_)
+
+ __all__.append("cpu_freq")
+
+
+if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
+ # Perform this hasattr check once on import time to either use the
+ # platform based code or proxy straight from the os module.
+ if hasattr(os, "getloadavg"):
+ getloadavg = os.getloadavg
+ else:
+ getloadavg = _psplatform.getloadavg
+
+ __all__.append("getloadavg")
+
+
+# =====================================================================
+# --- system memory related functions
+# =====================================================================
+
+
+def virtual_memory():
+ """Return statistics about system memory usage as a namedtuple
+ including the following fields, expressed in bytes:
+
+ - total:
+ total physical memory available.
+
+ - available:
+ the memory that can be given instantly to processes without the
+ system going into swap.
+ This is calculated by summing different memory values depending
+ on the platform and it is supposed to be used to monitor actual
+ memory usage in a cross platform fashion.
+
+ - percent:
+ the percentage usage calculated as (total - available) / total * 100
+
+ - used:
+ memory used, calculated differently depending on the platform and
+ designed for informational purposes only:
+ macOS: active + wired
+ BSD: active + wired + cached
+ Linux: total - free
+
+ - free:
+ memory not being used at all (zeroed) that is readily available;
+ note that this doesn't reflect the actual memory available
+ (use 'available' instead)
+
+ Platform-specific fields:
+
+ - active (UNIX):
+ memory currently in use or very recently used, and so it is in RAM.
+
+ - inactive (UNIX):
+ memory that is marked as not used.
+
+ - buffers (BSD, Linux):
+ cache for things like file system metadata.
+
+ - cached (BSD, macOS):
+ cache for various things.
+
+ - wired (macOS, BSD):
+ memory that is marked to always stay in RAM. It is never moved to disk.
+
+ - shared (BSD):
+ memory that may be simultaneously accessed by multiple processes.
+
+ The sum of 'used' and 'available' does not necessarily equal total.
+ On Windows 'available' and 'free' are the same.
+ """
+ global _TOTAL_PHYMEM
+ ret = _psplatform.virtual_memory()
+ # cached for later use in Process.memory_percent()
+ _TOTAL_PHYMEM = ret.total
+ return ret
+
+
+def swap_memory():
+ """Return system swap memory statistics as a namedtuple including
+ the following fields:
+
+ - total: total swap memory in bytes
+ - used: used swap memory in bytes
+ - free: free swap memory in bytes
+ - percent: the percentage usage
+ - sin: no. of bytes the system has swapped in from disk (cumulative)
+ - sout: no. of bytes the system has swapped out from disk (cumulative)
+
+ 'sin' and 'sout' on Windows are meaningless and always set to 0.
+ """
+ return _psplatform.swap_memory()
+
+
+# =====================================================================
+# --- disks/paritions related functions
+# =====================================================================
+
+
+def disk_usage(path):
+ """Return disk usage statistics about the given *path* as a
+ namedtuple including total, used and free space expressed in bytes
+ plus the percentage usage.
+ """
+ return _psplatform.disk_usage(path)
+
+
+def disk_partitions(all=False):
+ """Return mounted partitions as a list of
+ (device, mountpoint, fstype, opts) namedtuple.
+ 'opts' field is a raw string separated by commas indicating mount
+ options which may vary depending on the platform.
+
+ If *all* parameter is False return physical devices only and ignore
+ all others.
+ """
+ def pathconf(path, name):
+ try:
+ return os.pathconf(path, name)
+ except (OSError, AttributeError):
+ pass
+
+ ret = _psplatform.disk_partitions(all)
+ if POSIX:
+ new = []
+ for item in ret:
+ nt = item._replace(
+ maxfile=pathconf(item.mountpoint, 'PC_NAME_MAX'),
+ maxpath=pathconf(item.mountpoint, 'PC_PATH_MAX'))
+ new.append(nt)
+ return new
+ else:
+ return ret
+
+
+def disk_io_counters(perdisk=False, nowrap=True):
+ """Return system disk I/O statistics as a namedtuple including
+ the following fields:
+
+ - read_count: number of reads
+ - write_count: number of writes
+ - read_bytes: number of bytes read
+ - write_bytes: number of bytes written
+ - read_time: time spent reading from disk (in ms)
+ - write_time: time spent writing to disk (in ms)
+
+ Platform specific:
+
+ - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
+ - read_merged_count (Linux): number of merged reads
+ - write_merged_count (Linux): number of merged writes
+
+ If *perdisk* is True return the same information for every
+ physical disk installed on the system as a dictionary
+ with partition names as the keys and the namedtuple
+ described above as the values.
+
+ If *nowrap* is True it detects and adjust the numbers which overflow
+ and wrap (restart from 0) and add "old value" to "new value" so that
+ the returned numbers will always be increasing or remain the same,
+ but never decrease.
+ "disk_io_counters.cache_clear()" can be used to invalidate the
+ cache.
+
+ On recent Windows versions 'diskperf -y' command may need to be
+ executed first otherwise this function won't find any disk.
+ """
+ kwargs = dict(perdisk=perdisk) if LINUX else {}
+ rawdict = _psplatform.disk_io_counters(**kwargs)
+ if not rawdict:
+ return {} if perdisk else None
+ if nowrap:
+ rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
+ nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
+ if perdisk:
+ for disk, fields in rawdict.items():
+ rawdict[disk] = nt(*fields)
+ return rawdict
+ else:
+ return nt(*(sum(x) for x in zip(*rawdict.values())))
+
+
+disk_io_counters.cache_clear = functools.partial(
+ _wrap_numbers.cache_clear, 'psutil.disk_io_counters')
+disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
+
+
+# =====================================================================
+# --- network related functions
+# =====================================================================
+
+
+def net_io_counters(pernic=False, nowrap=True):
+ """Return network I/O statistics as a namedtuple including
+ the following fields:
+
+ - bytes_sent: number of bytes sent
+ - bytes_recv: number of bytes received
+ - packets_sent: number of packets sent
+ - packets_recv: number of packets received
+ - errin: total number of errors while receiving
+ - errout: total number of errors while sending
+ - dropin: total number of incoming packets which were dropped
+ - dropout: total number of outgoing packets which were dropped
+ (always 0 on macOS and BSD)
+
+ If *pernic* is True return the same information for every
+ network interface installed on the system as a dictionary
+ with network interface names as the keys and the namedtuple
+ described above as the values.
+
+ If *nowrap* is True it detects and adjust the numbers which overflow
+ and wrap (restart from 0) and add "old value" to "new value" so that
+ the returned numbers will always be increasing or remain the same,
+ but never decrease.
+ "disk_io_counters.cache_clear()" can be used to invalidate the
+ cache.
+ """
+ rawdict = _psplatform.net_io_counters()
+ if not rawdict:
+ return {} if pernic else None
+ if nowrap:
+ rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
+ if pernic:
+ for nic, fields in rawdict.items():
+ rawdict[nic] = _common.snetio(*fields)
+ return rawdict
+ else:
+ return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
+
+
+net_io_counters.cache_clear = functools.partial(
+ _wrap_numbers.cache_clear, 'psutil.net_io_counters')
+net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
+
+
+def net_connections(kind='inet'):
+ """Return system-wide socket connections as a list of
+ (fd, family, type, laddr, raddr, status, pid) namedtuples.
+ In case of limited privileges 'fd' and 'pid' may be set to -1
+ and None respectively.
+ The *kind* parameter filters for connections that fit the
+ following criteria:
+
+ +------------+----------------------------------------------------+
+ | Kind Value | Connections using |
+ +------------+----------------------------------------------------+
+ | inet | IPv4 and IPv6 |
+ | inet4 | IPv4 |
+ | inet6 | IPv6 |
+ | tcp | TCP |
+ | tcp4 | TCP over IPv4 |
+ | tcp6 | TCP over IPv6 |
+ | udp | UDP |
+ | udp4 | UDP over IPv4 |
+ | udp6 | UDP over IPv6 |
+ | unix | UNIX socket (both UDP and TCP protocols) |
+ | all | the sum of all the possible families and protocols |
+ +------------+----------------------------------------------------+
+
+ On macOS this function requires root privileges.
+ """
+ return _psplatform.net_connections(kind)
+
+
+def net_if_addrs():
+ """Return the addresses associated to each NIC (network interface
+ card) installed on the system as a dictionary whose keys are the
+ NIC names and value is a list of namedtuples for each address
+ assigned to the NIC. Each namedtuple includes 5 fields:
+
+ - family: can be either socket.AF_INET, socket.AF_INET6 or
+ psutil.AF_LINK, which refers to a MAC address.
+ - address: is the primary address and it is always set.
+ - netmask: and 'broadcast' and 'ptp' may be None.
+ - ptp: stands for "point to point" and references the
+ destination address on a point to point interface
+ (typically a VPN).
+ - broadcast: and *ptp* are mutually exclusive.
+
+ Note: you can have more than one address of the same family
+ associated with each interface.
+ """
+ has_enums = sys.version_info >= (3, 4)
+ if has_enums:
+ import socket
+ rawlist = _psplatform.net_if_addrs()
+ rawlist.sort(key=lambda x: x[1]) # sort by family
+ ret = collections.defaultdict(list)
+ for name, fam, addr, mask, broadcast, ptp in rawlist:
+ if has_enums:
+ try:
+ fam = socket.AddressFamily(fam)
+ except ValueError:
+ if WINDOWS and fam == -1:
+ fam = _psplatform.AF_LINK
+ elif (hasattr(_psplatform, "AF_LINK") and
+ _psplatform.AF_LINK == fam):
+ # Linux defines AF_LINK as an alias for AF_PACKET.
+ # We re-set the family here so that repr(family)
+ # will show AF_LINK rather than AF_PACKET
+ fam = _psplatform.AF_LINK
+ if fam == _psplatform.AF_LINK:
+ # The underlying C function may return an incomplete MAC
+ # address in which case we fill it with null bytes, see:
+ # https://github.com/giampaolo/psutil/issues/786
+ separator = ":" if POSIX else "-"
+ while addr.count(separator) < 5:
+ addr += "%s00" % separator
+ ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
+ return dict(ret)
+
+
+def net_if_stats():
+ """Return information about each NIC (network interface card)
+ installed on the system as a dictionary whose keys are the
+ NIC names and value is a namedtuple with the following fields:
+
+ - isup: whether the interface is up (bool)
+ - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
+ NIC_DUPLEX_UNKNOWN
+ - speed: the NIC speed expressed in mega bits (MB); if it can't
+ be determined (e.g. 'localhost') it will be set to 0.
+ - mtu: the maximum transmission unit expressed in bytes.
+ """
+ return _psplatform.net_if_stats()
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+# Linux, macOS
+if hasattr(_psplatform, "sensors_temperatures"):
+
+ def sensors_temperatures(fahrenheit=False):
+ """Return hardware temperatures. Each entry is a namedtuple
+ representing a certain hardware sensor (it may be a CPU, an
+ hard disk or something else, depending on the OS and its
+ configuration).
+ All temperatures are expressed in celsius unless *fahrenheit*
+ is set to True.
+ """
+ def convert(n):
+ if n is not None:
+ return (float(n) * 9 / 5) + 32 if fahrenheit else n
+
+ ret = collections.defaultdict(list)
+ rawdict = _psplatform.sensors_temperatures()
+
+ for name, values in rawdict.items():
+ while values:
+ label, current, high, critical = values.pop(0)
+ current = convert(current)
+ high = convert(high)
+ critical = convert(critical)
+
+ if high and not critical:
+ critical = high
+ elif critical and not high:
+ high = critical
+
+ ret[name].append(
+ _common.shwtemp(label, current, high, critical))
+
+ return dict(ret)
+
+ __all__.append("sensors_temperatures")
+
+
+# Linux
+if hasattr(_psplatform, "sensors_fans"):
+
+ def sensors_fans():
+ """Return fans speed. Each entry is a namedtuple
+ representing a certain hardware sensor.
+ All speed are expressed in RPM (rounds per minute).
+ """
+ return _psplatform.sensors_fans()
+
+ __all__.append("sensors_fans")
+
+
+# Linux, Windows, FreeBSD, macOS
+if hasattr(_psplatform, "sensors_battery"):
+
+ def sensors_battery():
+ """Return battery information. If no battery is installed
+ returns None.
+
+ - percent: battery power left as a percentage.
+ - secsleft: a rough approximation of how many seconds are left
+ before the battery runs out of power. May be
+ POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
+ - power_plugged: True if the AC power cable is connected.
+ """
+ return _psplatform.sensors_battery()
+
+ __all__.append("sensors_battery")
+
+
+# =====================================================================
+# --- other system related functions
+# =====================================================================
+
+
+def boot_time():
+ """Return the system boot time expressed in seconds since the epoch."""
+ # Note: we are not caching this because it is subject to
+ # system clock updates.
+ return _psplatform.boot_time()
+
+
+def users():
+ """Return users currently connected on the system as a list of
+ namedtuples including the following fields.
+
+ - user: the name of the user
+ - terminal: the tty or pseudo-tty associated with the user, if any.
+ - host: the host name associated with the entry, if any.
+ - started: the creation time as a floating point number expressed in
+ seconds since the epoch.
+ """
+ return _psplatform.users()
+
+
+# =====================================================================
+# --- Windows services
+# =====================================================================
+
+
+if WINDOWS:
+
+ def win_service_iter():
+ """Return a generator yielding a WindowsService instance for all
+ Windows services installed.
+ """
+ return _psplatform.win_service_iter()
+
+ def win_service_get(name):
+ """Get a Windows service by *name*.
+ Raise NoSuchProcess if no service with such name exists.
+ """
+ return _psplatform.win_service_get(name)
+
+
+# =====================================================================
+
+
+def _set_debug(value):
+ """Enable or disable PSUTIL_DEBUG option, which prints debugging
+ messages to stderr.
+ """
+ import psutil._common
+ psutil._common.PSUTIL_DEBUG = bool(value)
+ _psplatform.cext.set_debug(bool(value))
+
+
+def test(): # pragma: no cover
+ from ._common import bytes2human
+ from ._compat import get_terminal_size
+
+ today_day = datetime.date.today()
+ templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
+ attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
+ 'create_time', 'memory_info', 'status', 'nice', 'username']
+ print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA
+ "STATUS", "START", "TIME", "CMDLINE"))
+ for p in process_iter(attrs, ad_value=None):
+ if p.info['create_time']:
+ ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
+ if ctime.date() == today_day:
+ ctime = ctime.strftime("%H:%M")
+ else:
+ ctime = ctime.strftime("%b%d")
+ else:
+ ctime = ''
+ if p.info['cpu_times']:
+ cputime = time.strftime("%M:%S",
+ time.localtime(sum(p.info['cpu_times'])))
+ else:
+ cputime = ''
+
+ user = p.info['username'] or ''
+ if not user and POSIX:
+ try:
+ user = p.uids()[0]
+ except Error:
+ pass
+ if user and WINDOWS and '\\' in user:
+ user = user.split('\\')[1]
+ user = user[:9]
+ vms = bytes2human(p.info['memory_info'].vms) if \
+ p.info['memory_info'] is not None else ''
+ rss = bytes2human(p.info['memory_info'].rss) if \
+ p.info['memory_info'] is not None else ''
+ memp = round(p.info['memory_percent'], 1) if \
+ p.info['memory_percent'] is not None else ''
+ nice = int(p.info['nice']) if p.info['nice'] else ''
+ if p.info['cmdline']:
+ cmdline = ' '.join(p.info['cmdline'])
+ else:
+ cmdline = p.info['name']
+ status = p.info['status'][:5] if p.info['status'] else ''
+
+ line = templ % (
+ user[:10],
+ p.info['pid'],
+ memp,
+ vms,
+ rss,
+ nice,
+ status,
+ ctime,
+ cputime,
+ cmdline)
+ print(line[:get_terminal_size()[0]]) # NOQA
+
+
+del memoize_when_activated, division
+if sys.version_info[0] < 3:
+ del num, x
+
+if __name__ == "__main__":
+ test()
diff --git a/lib/psutil/_common.py b/lib/psutil/_common.py
new file mode 100644
index 0000000..3414e8c
--- /dev/null
+++ b/lib/psutil/_common.py
@@ -0,0 +1,899 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common objects shared by __init__.py and _ps*.py modules."""
+
+# Note: this module is imported by setup.py so it should not import
+# psutil or third-party modules.
+
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import contextlib
+import errno
+import functools
+import os
+import socket
+import stat
+import sys
+import threading
+import warnings
+from collections import namedtuple
+from socket import AF_INET
+from socket import SOCK_DGRAM
+from socket import SOCK_STREAM
+
+
+try:
+ from socket import AF_INET6
+except ImportError:
+ AF_INET6 = None
+try:
+ from socket import AF_UNIX
+except ImportError:
+ AF_UNIX = None
+
+if sys.version_info >= (3, 4):
+ import enum
+else:
+ enum = None
+
+
+# can't take it from _common.py as this script is imported by setup.py
+PY3 = sys.version_info[0] == 3
+PSUTIL_DEBUG = bool(os.getenv('PSUTIL_DEBUG', 0))
+_DEFAULT = object()
+
+__all__ = [
+ # OS constants
+ 'FREEBSD', 'BSD', 'LINUX', 'NETBSD', 'OPENBSD', 'MACOS', 'OSX', 'POSIX',
+ 'SUNOS', 'WINDOWS',
+ # connection constants
+ 'CONN_CLOSE', 'CONN_CLOSE_WAIT', 'CONN_CLOSING', 'CONN_ESTABLISHED',
+ 'CONN_FIN_WAIT1', 'CONN_FIN_WAIT2', 'CONN_LAST_ACK', 'CONN_LISTEN',
+ 'CONN_NONE', 'CONN_SYN_RECV', 'CONN_SYN_SENT', 'CONN_TIME_WAIT',
+ # net constants
+ 'NIC_DUPLEX_FULL', 'NIC_DUPLEX_HALF', 'NIC_DUPLEX_UNKNOWN',
+ # process status constants
+ 'STATUS_DEAD', 'STATUS_DISK_SLEEP', 'STATUS_IDLE', 'STATUS_LOCKED',
+ 'STATUS_RUNNING', 'STATUS_SLEEPING', 'STATUS_STOPPED', 'STATUS_SUSPENDED',
+ 'STATUS_TRACING_STOP', 'STATUS_WAITING', 'STATUS_WAKE_KILL',
+ 'STATUS_WAKING', 'STATUS_ZOMBIE', 'STATUS_PARKED',
+ # other constants
+ 'ENCODING', 'ENCODING_ERRS', 'AF_INET6',
+ # named tuples
+ 'pconn', 'pcputimes', 'pctxsw', 'pgids', 'pio', 'pionice', 'popenfile',
+ 'pthread', 'puids', 'sconn', 'scpustats', 'sdiskio', 'sdiskpart',
+ 'sdiskusage', 'snetio', 'snicaddr', 'snicstats', 'sswap', 'suser',
+ # utility functions
+ 'conn_tmap', 'deprecated_method', 'isfile_strict', 'memoize',
+ 'parse_environ_block', 'path_exists_strict', 'usage_percent',
+ 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum', "wrap_numbers",
+ 'open_text', 'open_binary', 'cat', 'bcat',
+ 'bytes2human', 'conn_to_ntuple', 'debug',
+ # shell utils
+ 'hilite', 'term_supports_colors', 'print_color',
+]
+
+
+# ===================================================================
+# --- OS constants
+# ===================================================================
+
+
+POSIX = os.name == "posix"
+WINDOWS = os.name == "nt"
+LINUX = sys.platform.startswith("linux")
+MACOS = sys.platform.startswith("darwin")
+OSX = MACOS # deprecated alias
+FREEBSD = sys.platform.startswith(("freebsd", "midnightbsd"))
+OPENBSD = sys.platform.startswith("openbsd")
+NETBSD = sys.platform.startswith("netbsd")
+BSD = FREEBSD or OPENBSD or NETBSD
+SUNOS = sys.platform.startswith(("sunos", "solaris"))
+AIX = sys.platform.startswith("aix")
+
+
+# ===================================================================
+# --- API constants
+# ===================================================================
+
+
+# Process.status()
+STATUS_RUNNING = "running"
+STATUS_SLEEPING = "sleeping"
+STATUS_DISK_SLEEP = "disk-sleep"
+STATUS_STOPPED = "stopped"
+STATUS_TRACING_STOP = "tracing-stop"
+STATUS_ZOMBIE = "zombie"
+STATUS_DEAD = "dead"
+STATUS_WAKE_KILL = "wake-kill"
+STATUS_WAKING = "waking"
+STATUS_IDLE = "idle" # Linux, macOS, FreeBSD
+STATUS_LOCKED = "locked" # FreeBSD
+STATUS_WAITING = "waiting" # FreeBSD
+STATUS_SUSPENDED = "suspended" # NetBSD
+STATUS_PARKED = "parked" # Linux
+
+# Process.connections() and psutil.net_connections()
+CONN_ESTABLISHED = "ESTABLISHED"
+CONN_SYN_SENT = "SYN_SENT"
+CONN_SYN_RECV = "SYN_RECV"
+CONN_FIN_WAIT1 = "FIN_WAIT1"
+CONN_FIN_WAIT2 = "FIN_WAIT2"
+CONN_TIME_WAIT = "TIME_WAIT"
+CONN_CLOSE = "CLOSE"
+CONN_CLOSE_WAIT = "CLOSE_WAIT"
+CONN_LAST_ACK = "LAST_ACK"
+CONN_LISTEN = "LISTEN"
+CONN_CLOSING = "CLOSING"
+CONN_NONE = "NONE"
+
+# net_if_stats()
+if enum is None:
+ NIC_DUPLEX_FULL = 2
+ NIC_DUPLEX_HALF = 1
+ NIC_DUPLEX_UNKNOWN = 0
+else:
+ class NicDuplex(enum.IntEnum):
+ NIC_DUPLEX_FULL = 2
+ NIC_DUPLEX_HALF = 1
+ NIC_DUPLEX_UNKNOWN = 0
+
+ globals().update(NicDuplex.__members__)
+
+# sensors_battery()
+if enum is None:
+ POWER_TIME_UNKNOWN = -1
+ POWER_TIME_UNLIMITED = -2
+else:
+ class BatteryTime(enum.IntEnum):
+ POWER_TIME_UNKNOWN = -1
+ POWER_TIME_UNLIMITED = -2
+
+ globals().update(BatteryTime.__members__)
+
+# --- others
+
+ENCODING = sys.getfilesystemencoding()
+if not PY3:
+ ENCODING_ERRS = "replace"
+else:
+ try:
+ ENCODING_ERRS = sys.getfilesystemencodeerrors() # py 3.6
+ except AttributeError:
+ ENCODING_ERRS = "surrogateescape" if POSIX else "replace"
+
+
+# ===================================================================
+# --- namedtuples
+# ===================================================================
+
+# --- for system functions
+
+# psutil.swap_memory()
+sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
+ 'sout'])
+# psutil.disk_usage()
+sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time'])
+# psutil.disk_partitions()
+sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts',
+ 'maxfile', 'maxpath'])
+# psutil.net_io_counters()
+snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
+ 'packets_sent', 'packets_recv',
+ 'errin', 'errout',
+ 'dropin', 'dropout'])
+# psutil.users()
+suser = namedtuple('suser', ['name', 'terminal', 'host', 'started', 'pid'])
+# psutil.net_connections()
+sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+ 'status', 'pid'])
+# psutil.net_if_addrs()
+snicaddr = namedtuple('snicaddr',
+ ['family', 'address', 'netmask', 'broadcast', 'ptp'])
+# psutil.net_if_stats()
+snicstats = namedtuple('snicstats',
+ ['isup', 'duplex', 'speed', 'mtu', 'flags'])
+# psutil.cpu_stats()
+scpustats = namedtuple(
+ 'scpustats', ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls'])
+# psutil.cpu_freq()
+scpufreq = namedtuple('scpufreq', ['current', 'min', 'max'])
+# psutil.sensors_temperatures()
+shwtemp = namedtuple(
+ 'shwtemp', ['label', 'current', 'high', 'critical'])
+# psutil.sensors_battery()
+sbattery = namedtuple('sbattery', ['percent', 'secsleft', 'power_plugged'])
+# psutil.sensors_fans()
+sfan = namedtuple('sfan', ['label', 'current'])
+
+# --- for Process methods
+
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.Process.open_files()
+popenfile = namedtuple('popenfile', ['path', 'fd'])
+# psutil.Process.threads()
+pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
+# psutil.Process.uids()
+puids = namedtuple('puids', ['real', 'effective', 'saved'])
+# psutil.Process.gids()
+pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes'])
+# psutil.Process.ionice()
+pionice = namedtuple('pionice', ['ioclass', 'value'])
+# psutil.Process.ctx_switches()
+pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
+# psutil.Process.connections()
+pconn = namedtuple('pconn', ['fd', 'family', 'type', 'laddr', 'raddr',
+ 'status'])
+
+# psutil.connections() and psutil.Process.connections()
+addr = namedtuple('addr', ['ip', 'port'])
+
+
+# ===================================================================
+# --- Process.connections() 'kind' parameter mapping
+# ===================================================================
+
+
+conn_tmap = {
+ "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+ "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
+ "tcp4": ([AF_INET], [SOCK_STREAM]),
+ "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
+ "udp4": ([AF_INET], [SOCK_DGRAM]),
+ "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+ "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
+ "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
+}
+
+if AF_INET6 is not None:
+ conn_tmap.update({
+ "tcp6": ([AF_INET6], [SOCK_STREAM]),
+ "udp6": ([AF_INET6], [SOCK_DGRAM]),
+ })
+
+if AF_UNIX is not None:
+ conn_tmap.update({
+ "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
+ })
+
+
+# =====================================================================
+# --- Exceptions
+# =====================================================================
+
+
+class Error(Exception):
+ """Base exception class. All other psutil exceptions inherit
+ from this one.
+ """
+ __module__ = 'psutil'
+
+ def _infodict(self, attrs):
+ info = collections.OrderedDict()
+ for name in attrs:
+ value = getattr(self, name, None)
+ if value:
+ info[name] = value
+ elif name == "pid" and value == 0:
+ info[name] = value
+ return info
+
+ def __str__(self):
+ # invoked on `raise Error`
+ info = self._infodict(("pid", "ppid", "name"))
+ if info:
+ details = "(%s)" % ", ".join(
+ ["%s=%r" % (k, v) for k, v in info.items()])
+ else:
+ details = None
+ return " ".join([x for x in (getattr(self, "msg", ""), details) if x])
+
+ def __repr__(self):
+ # invoked on `repr(Error)`
+ info = self._infodict(("pid", "ppid", "name", "seconds", "msg"))
+ details = ", ".join(["%s=%r" % (k, v) for k, v in info.items()])
+ return "psutil.%s(%s)" % (self.__class__.__name__, details)
+
+
+class NoSuchProcess(Error):
+ """Exception raised when a process with a certain PID doesn't
+ or no longer exists.
+ """
+ __module__ = 'psutil'
+
+ def __init__(self, pid, name=None, msg=None):
+ Error.__init__(self)
+ self.pid = pid
+ self.name = name
+ self.msg = msg or "process no longer exists"
+
+
+class ZombieProcess(NoSuchProcess):
+ """Exception raised when querying a zombie process. This is
+ raised on macOS, BSD and Solaris only, and not always: depending
+ on the query the OS may be able to succeed anyway.
+ On Linux all zombie processes are querable (hence this is never
+ raised). Windows doesn't have zombie processes.
+ """
+ __module__ = 'psutil'
+
+ def __init__(self, pid, name=None, ppid=None, msg=None):
+ NoSuchProcess.__init__(self, pid, name, msg)
+ self.ppid = ppid
+ self.msg = msg or "PID still exists but it's a zombie"
+
+
+class AccessDenied(Error):
+ """Exception raised when permission to perform an action is denied."""
+ __module__ = 'psutil'
+
+ def __init__(self, pid=None, name=None, msg=None):
+ Error.__init__(self)
+ self.pid = pid
+ self.name = name
+ self.msg = msg or ""
+
+
+class TimeoutExpired(Error):
+ """Raised on Process.wait(timeout) if timeout expires and process
+ is still alive.
+ """
+ __module__ = 'psutil'
+
+ def __init__(self, seconds, pid=None, name=None):
+ Error.__init__(self)
+ self.seconds = seconds
+ self.pid = pid
+ self.name = name
+ self.msg = "timeout after %s seconds" % seconds
+
+
+# ===================================================================
+# --- utils
+# ===================================================================
+
+
+def usage_percent(used, total, round_=None):
+ """Calculate percentage usage of 'used' against 'total'."""
+ try:
+ ret = (float(used) / total) * 100
+ except ZeroDivisionError:
+ return 0.0
+ else:
+ if round_ is not None:
+ ret = round(ret, round_)
+ return ret
+
+
+def memoize(fun):
+ """A simple memoize decorator for functions supporting (hashable)
+ positional arguments.
+ It also provides a cache_clear() function for clearing the cache:
+
+ >>> @memoize
+ ... def foo()
+ ... return 1
+ ...
+ >>> foo()
+ 1
+ >>> foo.cache_clear()
+ >>>
+ """
+ @functools.wraps(fun)
+ def wrapper(*args, **kwargs):
+ key = (args, frozenset(sorted(kwargs.items())))
+ try:
+ return cache[key]
+ except KeyError:
+ ret = cache[key] = fun(*args, **kwargs)
+ return ret
+
+ def cache_clear():
+ """Clear cache."""
+ cache.clear()
+
+ cache = {}
+ wrapper.cache_clear = cache_clear
+ return wrapper
+
+
+def memoize_when_activated(fun):
+ """A memoize decorator which is disabled by default. It can be
+ activated and deactivated on request.
+ For efficiency reasons it can be used only against class methods
+ accepting no arguments.
+
+ >>> class Foo:
+ ... @memoize
+ ... def foo()
+ ... print(1)
+ ...
+ >>> f = Foo()
+ >>> # deactivated (default)
+ >>> foo()
+ 1
+ >>> foo()
+ 1
+ >>>
+ >>> # activated
+ >>> foo.cache_activate(self)
+ >>> foo()
+ 1
+ >>> foo()
+ >>> foo()
+ >>>
+ """
+ @functools.wraps(fun)
+ def wrapper(self):
+ try:
+ # case 1: we previously entered oneshot() ctx
+ ret = self._cache[fun]
+ except AttributeError:
+ # case 2: we never entered oneshot() ctx
+ return fun(self)
+ except KeyError:
+ # case 3: we entered oneshot() ctx but there's no cache
+ # for this entry yet
+ ret = fun(self)
+ try:
+ self._cache[fun] = ret
+ except AttributeError:
+ # multi-threading race condition, see:
+ # https://github.com/giampaolo/psutil/issues/1948
+ pass
+ return ret
+
+ def cache_activate(proc):
+ """Activate cache. Expects a Process instance. Cache will be
+ stored as a "_cache" instance attribute."""
+ proc._cache = {}
+
+ def cache_deactivate(proc):
+ """Deactivate and clear cache."""
+ try:
+ del proc._cache
+ except AttributeError:
+ pass
+
+ wrapper.cache_activate = cache_activate
+ wrapper.cache_deactivate = cache_deactivate
+ return wrapper
+
+
+def isfile_strict(path):
+ """Same as os.path.isfile() but does not swallow EACCES / EPERM
+ exceptions, see:
+ http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+ """
+ try:
+ st = os.stat(path)
+ except OSError as err:
+ if err.errno in (errno.EPERM, errno.EACCES):
+ raise
+ return False
+ else:
+ return stat.S_ISREG(st.st_mode)
+
+
+def path_exists_strict(path):
+ """Same as os.path.exists() but does not swallow EACCES / EPERM
+ exceptions, see:
+ http://mail.python.org/pipermail/python-dev/2012-June/120787.html
+ """
+ try:
+ os.stat(path)
+ except OSError as err:
+ if err.errno in (errno.EPERM, errno.EACCES):
+ raise
+ return False
+ else:
+ return True
+
+
+@memoize
+def supports_ipv6():
+ """Return True if IPv6 is supported on this platform."""
+ if not socket.has_ipv6 or AF_INET6 is None:
+ return False
+ try:
+ sock = socket.socket(AF_INET6, socket.SOCK_STREAM)
+ with contextlib.closing(sock):
+ sock.bind(("::1", 0))
+ return True
+ except socket.error:
+ return False
+
+
+def parse_environ_block(data):
+ """Parse a C environ block of environment variables into a dictionary."""
+ # The block is usually raw data from the target process. It might contain
+ # trailing garbage and lines that do not look like assignments.
+ ret = {}
+ pos = 0
+
+ # localize global variable to speed up access.
+ WINDOWS_ = WINDOWS
+ while True:
+ next_pos = data.find("\0", pos)
+ # nul byte at the beginning or double nul byte means finish
+ if next_pos <= pos:
+ break
+ # there might not be an equals sign
+ equal_pos = data.find("=", pos, next_pos)
+ if equal_pos > pos:
+ key = data[pos:equal_pos]
+ value = data[equal_pos + 1:next_pos]
+ # Windows expects environment variables to be uppercase only
+ if WINDOWS_:
+ key = key.upper()
+ ret[key] = value
+ pos = next_pos + 1
+
+ return ret
+
+
+def sockfam_to_enum(num):
+ """Convert a numeric socket family value to an IntEnum member.
+ If it's not a known member, return the numeric value itself.
+ """
+ if enum is None:
+ return num
+ else: # pragma: no cover
+ try:
+ return socket.AddressFamily(num)
+ except ValueError:
+ return num
+
+
+def socktype_to_enum(num):
+ """Convert a numeric socket type value to an IntEnum member.
+ If it's not a known member, return the numeric value itself.
+ """
+ if enum is None:
+ return num
+ else: # pragma: no cover
+ try:
+ return socket.SocketKind(num)
+ except ValueError:
+ return num
+
+
+def conn_to_ntuple(fd, fam, type_, laddr, raddr, status, status_map, pid=None):
+ """Convert a raw connection tuple to a proper ntuple."""
+ if fam in (socket.AF_INET, AF_INET6):
+ if laddr:
+ laddr = addr(*laddr)
+ if raddr:
+ raddr = addr(*raddr)
+ if type_ == socket.SOCK_STREAM and fam in (AF_INET, AF_INET6):
+ status = status_map.get(status, CONN_NONE)
+ else:
+ status = CONN_NONE # ignore whatever C returned to us
+ fam = sockfam_to_enum(fam)
+ type_ = socktype_to_enum(type_)
+ if pid is None:
+ return pconn(fd, fam, type_, laddr, raddr, status)
+ else:
+ return sconn(fd, fam, type_, laddr, raddr, status, pid)
+
+
+def deprecated_method(replacement):
+ """A decorator which can be used to mark a method as deprecated
+ 'replcement' is the method name which will be called instead.
+ """
+ def outer(fun):
+ msg = "%s() is deprecated and will be removed; use %s() instead" % (
+ fun.__name__, replacement)
+ if fun.__doc__ is None:
+ fun.__doc__ = msg
+
+ @functools.wraps(fun)
+ def inner(self, *args, **kwargs):
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+ return getattr(self, replacement)(*args, **kwargs)
+ return inner
+ return outer
+
+
+class _WrapNumbers:
+ """Watches numbers so that they don't overflow and wrap
+ (reset to zero).
+ """
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.cache = {}
+ self.reminders = {}
+ self.reminder_keys = {}
+
+ def _add_dict(self, input_dict, name):
+ assert name not in self.cache
+ assert name not in self.reminders
+ assert name not in self.reminder_keys
+ self.cache[name] = input_dict
+ self.reminders[name] = collections.defaultdict(int)
+ self.reminder_keys[name] = collections.defaultdict(set)
+
+ def _remove_dead_reminders(self, input_dict, name):
+ """In case the number of keys changed between calls (e.g. a
+ disk disappears) this removes the entry from self.reminders.
+ """
+ old_dict = self.cache[name]
+ gone_keys = set(old_dict.keys()) - set(input_dict.keys())
+ for gone_key in gone_keys:
+ for remkey in self.reminder_keys[name][gone_key]:
+ del self.reminders[name][remkey]
+ del self.reminder_keys[name][gone_key]
+
+ def run(self, input_dict, name):
+ """Cache dict and sum numbers which overflow and wrap.
+ Return an updated copy of `input_dict`
+ """
+ if name not in self.cache:
+ # This was the first call.
+ self._add_dict(input_dict, name)
+ return input_dict
+
+ self._remove_dead_reminders(input_dict, name)
+
+ old_dict = self.cache[name]
+ new_dict = {}
+ for key in input_dict.keys():
+ input_tuple = input_dict[key]
+ try:
+ old_tuple = old_dict[key]
+ except KeyError:
+ # The input dict has a new key (e.g. a new disk or NIC)
+ # which didn't exist in the previous call.
+ new_dict[key] = input_tuple
+ continue
+
+ bits = []
+ for i in range(len(input_tuple)):
+ input_value = input_tuple[i]
+ old_value = old_tuple[i]
+ remkey = (key, i)
+ if input_value < old_value:
+ # it wrapped!
+ self.reminders[name][remkey] += old_value
+ self.reminder_keys[name][key].add(remkey)
+ bits.append(input_value + self.reminders[name][remkey])
+
+ new_dict[key] = tuple(bits)
+
+ self.cache[name] = input_dict
+ return new_dict
+
+ def cache_clear(self, name=None):
+ """Clear the internal cache, optionally only for function 'name'."""
+ with self.lock:
+ if name is None:
+ self.cache.clear()
+ self.reminders.clear()
+ self.reminder_keys.clear()
+ else:
+ self.cache.pop(name, None)
+ self.reminders.pop(name, None)
+ self.reminder_keys.pop(name, None)
+
+ def cache_info(self):
+ """Return internal cache dicts as a tuple of 3 elements."""
+ with self.lock:
+ return (self.cache, self.reminders, self.reminder_keys)
+
+
+def wrap_numbers(input_dict, name):
+ """Given an `input_dict` and a function `name`, adjust the numbers
+ which "wrap" (restart from zero) across different calls by adding
+ "old value" to "new value" and return an updated dict.
+ """
+ with _wn.lock:
+ return _wn.run(input_dict, name)
+
+
+_wn = _WrapNumbers()
+wrap_numbers.cache_clear = _wn.cache_clear
+wrap_numbers.cache_info = _wn.cache_info
+
+
+# The read buffer size for open() builtin. This (also) dictates how
+# much data we read(2) when iterating over file lines as in:
+# >>> with open(file) as f:
+# ... for line in f:
+# ... ...
+# Default per-line buffer size for binary files is 1K. For text files
+# is 8K. We use a bigger buffer (32K) in order to have more consistent
+# results when reading /proc pseudo files on Linux, see:
+# https://github.com/giampaolo/psutil/issues/2050
+# On Python 2 this also speeds up the reading of big files:
+# (namely /proc/{pid}/smaps and /proc/net/*):
+# https://github.com/giampaolo/psutil/issues/708
+FILE_READ_BUFFER_SIZE = 32 * 1024
+
+
+def open_binary(fname):
+ return open(fname, "rb", buffering=FILE_READ_BUFFER_SIZE)
+
+
+def open_text(fname):
+ """On Python 3 opens a file in text mode by using fs encoding and
+ a proper en/decoding errors handler.
+ On Python 2 this is just an alias for open(name, 'rt').
+ """
+ if not PY3:
+ return open(fname, "rt", buffering=FILE_READ_BUFFER_SIZE)
+
+ # See:
+ # https://github.com/giampaolo/psutil/issues/675
+ # https://github.com/giampaolo/psutil/pull/733
+ fobj = open(fname, "rt", buffering=FILE_READ_BUFFER_SIZE,
+ encoding=ENCODING, errors=ENCODING_ERRS)
+ try:
+ # Dictates per-line read(2) buffer size. Defaults is 8k. See:
+ # https://github.com/giampaolo/psutil/issues/2050#issuecomment-1013387546
+ fobj._CHUNK_SIZE = FILE_READ_BUFFER_SIZE
+ except AttributeError:
+ pass
+ except Exception:
+ fobj.close()
+ raise
+
+ return fobj
+
+
+def cat(fname, fallback=_DEFAULT, _open=open_text):
+ """Read entire file content and return it as a string. File is
+ opened in text mode. If specified, `fallback` is the value
+ returned in case of error, either if the file does not exist or
+ it can't be read().
+ """
+ if fallback is _DEFAULT:
+ with _open(fname) as f:
+ return f.read()
+ else:
+ try:
+ with _open(fname) as f:
+ return f.read()
+ except (IOError, OSError):
+ return fallback
+
+
+def bcat(fname, fallback=_DEFAULT):
+ """Same as above but opens file in binary mode."""
+ return cat(fname, fallback=fallback, _open=open_binary)
+
+
+def bytes2human(n, format="%(value).1f%(symbol)s"):
+ """Used by various scripts. See:
+ http://goo.gl/zeJZl
+
+ >>> bytes2human(10000)
+ '9.8K'
+ >>> bytes2human(100001221)
+ '95.4M'
+ """
+ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+ prefix = {}
+ for i, s in enumerate(symbols[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+ for symbol in reversed(symbols[1:]):
+ if n >= prefix[symbol]:
+ value = float(n) / prefix[symbol]
+ return format % locals()
+ return format % dict(symbol=symbols[0], value=n)
+
+
+def get_procfs_path():
+ """Return updated psutil.PROCFS_PATH constant."""
+ return sys.modules['psutil'].PROCFS_PATH
+
+
+if PY3:
+ def decode(s):
+ return s.decode(encoding=ENCODING, errors=ENCODING_ERRS)
+else:
+ def decode(s):
+ return s
+
+
+# =====================================================================
+# --- shell utils
+# =====================================================================
+
+
+@memoize
+def term_supports_colors(file=sys.stdout): # pragma: no cover
+ if os.name == 'nt':
+ return True
+ try:
+ import curses
+ assert file.isatty()
+ curses.setupterm()
+ assert curses.tigetnum("colors") > 0
+ except Exception:
+ return False
+ else:
+ return True
+
+
+def hilite(s, color=None, bold=False): # pragma: no cover
+ """Return an highlighted version of 'string'."""
+ if not term_supports_colors():
+ return s
+ attr = []
+ colors = dict(green='32', red='91', brown='33', yellow='93', blue='34',
+ violet='35', lightblue='36', grey='37', darkgrey='30')
+ colors[None] = '29'
+ try:
+ color = colors[color]
+ except KeyError:
+ raise ValueError("invalid color %r; choose between %s" % (
+ list(colors.keys())))
+ attr.append(color)
+ if bold:
+ attr.append('1')
+ return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s)
+
+
+def print_color(
+ s, color=None, bold=False, file=sys.stdout): # pragma: no cover
+ """Print a colorized version of string."""
+ if not term_supports_colors():
+ print(s, file=file) # NOQA
+ elif POSIX:
+ print(hilite(s, color, bold), file=file) # NOQA
+ else:
+ import ctypes
+
+ DEFAULT_COLOR = 7
+ GetStdHandle = ctypes.windll.Kernel32.GetStdHandle
+ SetConsoleTextAttribute = \
+ ctypes.windll.Kernel32.SetConsoleTextAttribute
+
+ colors = dict(green=2, red=4, brown=6, yellow=6)
+ colors[None] = DEFAULT_COLOR
+ try:
+ color = colors[color]
+ except KeyError:
+ raise ValueError("invalid color %r; choose between %r" % (
+ color, list(colors.keys())))
+ if bold and color <= 7:
+ color += 8
+
+ handle_id = -12 if file is sys.stderr else -11
+ GetStdHandle.restype = ctypes.c_ulong
+ handle = GetStdHandle(handle_id)
+ SetConsoleTextAttribute(handle, color)
+ try:
+ print(s, file=file) # NOQA
+ finally:
+ SetConsoleTextAttribute(handle, DEFAULT_COLOR)
+
+
+def debug(msg):
+ """If PSUTIL_DEBUG env var is set, print a debug message to stderr."""
+ if PSUTIL_DEBUG:
+ import inspect
+ fname, lineno, func_name, lines, index = inspect.getframeinfo(
+ inspect.currentframe().f_back)
+ if isinstance(msg, Exception):
+ if isinstance(msg, (OSError, IOError, EnvironmentError)):
+ # ...because str(exc) may contain info about the file name
+ msg = "ignoring %s" % msg
+ else:
+ msg = "ignoring %r" % msg
+ print("psutil-debug [%s:%s]> %s" % (fname, lineno, msg), # NOQA
+ file=sys.stderr)
diff --git a/lib/psutil/_compat.py b/lib/psutil/_compat.py
new file mode 100644
index 0000000..52e762b
--- /dev/null
+++ b/lib/psutil/_compat.py
@@ -0,0 +1,450 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module which provides compatibility with older Python versions.
+This is more future-compatible rather than the opposite (prefer latest
+Python 3 way of doing things).
+"""
+
+import collections
+import contextlib
+import errno
+import functools
+import os
+import sys
+import types
+
+
+__all__ = [
+ # constants
+ "PY3",
+ # builtins
+ "long", "range", "super", "unicode", "basestring",
+ # literals
+ "u", "b",
+ # collections module
+ "lru_cache",
+ # shutil module
+ "which", "get_terminal_size",
+ # contextlib module
+ "redirect_stderr",
+ # python 3 exceptions
+ "FileNotFoundError", "PermissionError", "ProcessLookupError",
+ "InterruptedError", "ChildProcessError", "FileExistsError"]
+
+
+PY3 = sys.version_info[0] == 3
+_SENTINEL = object()
+
+if PY3:
+ long = int
+ xrange = range
+ unicode = str
+ basestring = str
+ range = range
+
+ def u(s):
+ return s
+
+ def b(s):
+ return s.encode("latin-1")
+else:
+ long = long
+ range = xrange
+ unicode = unicode
+ basestring = basestring
+
+ def u(s):
+ return unicode(s, "unicode_escape")
+
+ def b(s):
+ return s
+
+
+# --- builtins
+
+
+# Python 3 super().
+# Taken from "future" package.
+# Credit: Ryan Kelly
+if PY3:
+ super = super
+else:
+ _builtin_super = super
+
+ def super(type_=_SENTINEL, type_or_obj=_SENTINEL, framedepth=1):
+ """Like Python 3 builtin super(). If called without any arguments
+ it attempts to infer them at runtime.
+ """
+ if type_ is _SENTINEL:
+ f = sys._getframe(framedepth)
+ try:
+ # Get the function's first positional argument.
+ type_or_obj = f.f_locals[f.f_code.co_varnames[0]]
+ except (IndexError, KeyError):
+ raise RuntimeError('super() used in a function with no args')
+ try:
+ # Get the MRO so we can crawl it.
+ mro = type_or_obj.__mro__
+ except (AttributeError, RuntimeError):
+ try:
+ mro = type_or_obj.__class__.__mro__
+ except AttributeError:
+ raise RuntimeError('super() used in a non-newstyle class')
+ for type_ in mro:
+ # Find the class that owns the currently-executing method.
+ for meth in type_.__dict__.values():
+ # Drill down through any wrappers to the underlying func.
+ # This handles e.g. classmethod() and staticmethod().
+ try:
+ while not isinstance(meth, types.FunctionType):
+ if isinstance(meth, property):
+ # Calling __get__ on the property will invoke
+ # user code which might throw exceptions or
+ # have side effects
+ meth = meth.fget
+ else:
+ try:
+ meth = meth.__func__
+ except AttributeError:
+ meth = meth.__get__(type_or_obj, type_)
+ except (AttributeError, TypeError):
+ continue
+ if meth.func_code is f.f_code:
+ break # found
+ else:
+ # Not found. Move onto the next class in MRO.
+ continue
+ break # found
+ else:
+ raise RuntimeError('super() called outside a method')
+
+ # Dispatch to builtin super().
+ if type_or_obj is not _SENTINEL:
+ return _builtin_super(type_, type_or_obj)
+ return _builtin_super(type_)
+
+
+# --- exceptions
+
+
+if PY3:
+ FileNotFoundError = FileNotFoundError # NOQA
+ PermissionError = PermissionError # NOQA
+ ProcessLookupError = ProcessLookupError # NOQA
+ InterruptedError = InterruptedError # NOQA
+ ChildProcessError = ChildProcessError # NOQA
+ FileExistsError = FileExistsError # NOQA
+else:
+ # https://github.com/PythonCharmers/python-future/blob/exceptions/
+ # src/future/types/exceptions/pep3151.py
+ import platform
+
+ def _instance_checking_exception(base_exception=Exception):
+ def wrapped(instance_checker):
+ class TemporaryClass(base_exception):
+
+ def __init__(self, *args, **kwargs):
+ if len(args) == 1 and isinstance(args[0], TemporaryClass):
+ unwrap_me = args[0]
+ for attr in dir(unwrap_me):
+ if not attr.startswith('__'):
+ setattr(self, attr, getattr(unwrap_me, attr))
+ else:
+ super(TemporaryClass, self).__init__(*args, **kwargs)
+
+ class __metaclass__(type):
+ def __instancecheck__(cls, inst):
+ return instance_checker(inst)
+
+ def __subclasscheck__(cls, classinfo):
+ value = sys.exc_info()[1]
+ return isinstance(value, cls)
+
+ TemporaryClass.__name__ = instance_checker.__name__
+ TemporaryClass.__doc__ = instance_checker.__doc__
+ return TemporaryClass
+
+ return wrapped
+
+ @_instance_checking_exception(EnvironmentError)
+ def FileNotFoundError(inst):
+ return getattr(inst, 'errno', _SENTINEL) == errno.ENOENT
+
+ @_instance_checking_exception(EnvironmentError)
+ def ProcessLookupError(inst):
+ return getattr(inst, 'errno', _SENTINEL) == errno.ESRCH
+
+ @_instance_checking_exception(EnvironmentError)
+ def PermissionError(inst):
+ return getattr(inst, 'errno', _SENTINEL) in (
+ errno.EACCES, errno.EPERM)
+
+ @_instance_checking_exception(EnvironmentError)
+ def InterruptedError(inst):
+ return getattr(inst, 'errno', _SENTINEL) == errno.EINTR
+
+ @_instance_checking_exception(EnvironmentError)
+ def ChildProcessError(inst):
+ return getattr(inst, 'errno', _SENTINEL) == errno.ECHILD
+
+ @_instance_checking_exception(EnvironmentError)
+ def FileExistsError(inst):
+ return getattr(inst, 'errno', _SENTINEL) == errno.EEXIST
+
+ if platform.python_implementation() != "CPython":
+ try:
+ raise OSError(errno.EEXIST, "perm")
+ except FileExistsError:
+ pass
+ except OSError:
+ raise RuntimeError(
+ "broken or incompatible Python implementation, see: "
+ "https://github.com/giampaolo/psutil/issues/1659")
+
+
+# --- stdlib additions
+
+
+# py 3.2 functools.lru_cache
+# Taken from: http://code.activestate.com/recipes/578078
+# Credit: Raymond Hettinger
+try:
+ from functools import lru_cache
+except ImportError:
+ try:
+ from threading import RLock
+ except ImportError:
+ from dummy_threading import RLock
+
+ _CacheInfo = collections.namedtuple(
+ "CacheInfo", ["hits", "misses", "maxsize", "currsize"])
+
+ class _HashedSeq(list):
+ __slots__ = 'hashvalue'
+
+ def __init__(self, tup, hash=hash):
+ self[:] = tup
+ self.hashvalue = hash(tup)
+
+ def __hash__(self):
+ return self.hashvalue
+
+ def _make_key(args, kwds, typed,
+ kwd_mark=(_SENTINEL, ),
+ fasttypes=set((int, str, frozenset, type(None))), # noqa
+ sorted=sorted, tuple=tuple, type=type, len=len):
+ key = args
+ if kwds:
+ sorted_items = sorted(kwds.items())
+ key += kwd_mark
+ for item in sorted_items:
+ key += item
+ if typed:
+ key += tuple(type(v) for v in args)
+ if kwds:
+ key += tuple(type(v) for k, v in sorted_items)
+ elif len(key) == 1 and type(key[0]) in fasttypes:
+ return key[0]
+ return _HashedSeq(key)
+
+ def lru_cache(maxsize=100, typed=False):
+ """Least-recently-used cache decorator, see:
+ http://docs.python.org/3/library/functools.html#functools.lru_cache
+ """
+ def decorating_function(user_function):
+ cache = dict()
+ stats = [0, 0]
+ HITS, MISSES = 0, 1
+ make_key = _make_key
+ cache_get = cache.get
+ _len = len
+ lock = RLock()
+ root = []
+ root[:] = [root, root, None, None]
+ nonlocal_root = [root]
+ PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
+ if maxsize == 0:
+ def wrapper(*args, **kwds):
+ result = user_function(*args, **kwds)
+ stats[MISSES] += 1
+ return result
+ elif maxsize is None:
+ def wrapper(*args, **kwds):
+ key = make_key(args, kwds, typed)
+ result = cache_get(key, root)
+ if result is not root:
+ stats[HITS] += 1
+ return result
+ result = user_function(*args, **kwds)
+ cache[key] = result
+ stats[MISSES] += 1
+ return result
+ else:
+ def wrapper(*args, **kwds):
+ if kwds or typed:
+ key = make_key(args, kwds, typed)
+ else:
+ key = args
+ lock.acquire()
+ try:
+ link = cache_get(key)
+ if link is not None:
+ root, = nonlocal_root
+ link_prev, link_next, key, result = link
+ link_prev[NEXT] = link_next
+ link_next[PREV] = link_prev
+ last = root[PREV]
+ last[NEXT] = root[PREV] = link
+ link[PREV] = last
+ link[NEXT] = root
+ stats[HITS] += 1
+ return result
+ finally:
+ lock.release()
+ result = user_function(*args, **kwds)
+ lock.acquire()
+ try:
+ root, = nonlocal_root
+ if key in cache:
+ pass
+ elif _len(cache) >= maxsize:
+ oldroot = root
+ oldroot[KEY] = key
+ oldroot[RESULT] = result
+ root = nonlocal_root[0] = oldroot[NEXT]
+ oldkey = root[KEY]
+ root[KEY] = root[RESULT] = None
+ del cache[oldkey]
+ cache[key] = oldroot
+ else:
+ last = root[PREV]
+ link = [last, root, key, result]
+ last[NEXT] = root[PREV] = cache[key] = link
+ stats[MISSES] += 1
+ finally:
+ lock.release()
+ return result
+
+ def cache_info():
+ """Report cache statistics"""
+ lock.acquire()
+ try:
+ return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
+ len(cache))
+ finally:
+ lock.release()
+
+ def cache_clear():
+ """Clear the cache and cache statistics"""
+ lock.acquire()
+ try:
+ cache.clear()
+ root = nonlocal_root[0]
+ root[:] = [root, root, None, None]
+ stats[:] = [0, 0]
+ finally:
+ lock.release()
+
+ wrapper.__wrapped__ = user_function
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return functools.update_wrapper(wrapper, user_function)
+
+ return decorating_function
+
+
+# python 3.3
+try:
+ from shutil import which
+except ImportError:
+ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+ """
+ def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode) and
+ not os.path.isdir(fn))
+
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ if path is None:
+ path = os.environ.get("PATH", os.defpath)
+ if not path:
+ return None
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ if os.curdir not in path:
+ path.insert(0, os.curdir)
+
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if normdir not in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
+ return None
+
+
+# python 3.3
+try:
+ from shutil import get_terminal_size
+except ImportError:
+ def get_terminal_size(fallback=(80, 24)):
+ try:
+ import fcntl
+ import struct
+ import termios
+ except ImportError:
+ return fallback
+ else:
+ try:
+ # This should work on Linux.
+ res = struct.unpack(
+ 'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
+ return (res[1], res[0])
+ except Exception:
+ return fallback
+
+
+# python 3.3
+try:
+ from subprocess import TimeoutExpired as SubprocessTimeoutExpired
+except ImportError:
+ class SubprocessTimeoutExpired:
+ pass
+
+
+# python 3.5
+try:
+ from contextlib import redirect_stderr
+except ImportError:
+ @contextlib.contextmanager
+ def redirect_stderr(new_target):
+ original = sys.stderr
+ try:
+ sys.stderr = new_target
+ yield new_target
+ finally:
+ sys.stderr = original
diff --git a/lib/psutil/_psaix.py b/lib/psutil/_psaix.py
new file mode 100644
index 0000000..2391478
--- /dev/null
+++ b/lib/psutil/_psaix.py
@@ -0,0 +1,555 @@
+# Copyright (c) 2009, Giampaolo Rodola'
+# Copyright (c) 2017, Arnon Yaari
+# All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""AIX platform implementation."""
+
+import functools
+import glob
+import os
+import re
+import subprocess
+import sys
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_aix as cext
+from . import _psutil_posix as cext_posix
+from ._common import NIC_DUPLEX_FULL
+from ._common import NIC_DUPLEX_HALF
+from ._common import NIC_DUPLEX_UNKNOWN
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import ZombieProcess
+from ._common import conn_to_ntuple
+from ._common import get_procfs_path
+from ._common import memoize_when_activated
+from ._common import usage_percent
+from ._compat import PY3
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+
+
+__extra__all__ = ["PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+HAS_THREADS = hasattr(cext, "proc_threads")
+HAS_NET_IO_COUNTERS = hasattr(cext, "net_io_counters")
+HAS_PROC_IO_COUNTERS = hasattr(cext, "proc_io_counters")
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SACTIVE: _common.STATUS_RUNNING,
+ cext.SSWAP: _common.STATUS_RUNNING, # TODO what status is this?
+ cext.SSTOP: _common.STATUS_STOPPED,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ total, avail, free, pinned, inuse = cext.virtual_mem()
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, inuse, free)
+
+
+def swap_memory():
+ """Swap system memory as a (total, used, free, sin, sout) tuple."""
+ total, free, sin, sout = cext.swap_mem()
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_cores():
+ cmd = "lsdev -Cc processor"
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+ processors = stdout.strip().splitlines()
+ return len(processors) or None
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, soft_interrupts, syscalls = cext.cpu_stats()
+ return _common.scpustats(
+ ctx_switches, interrupts, soft_interrupts, syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ if not disk_usage(mountpoint).total:
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_if_addrs = cext_posix.net_if_addrs
+
+if HAS_NET_IO_COUNTERS:
+ net_io_counters = cext.net_io_counters
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ """
+ cmap = _common.conn_tmap
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = []
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ nt = conn_to_ntuple(fd, fam, type_, laddr, raddr, status,
+ TCP_STATUSES, pid=pid if _pid == -1 else None)
+ ret.append(nt)
+ return ret
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ duplex_map = {"Full": NIC_DUPLEX_FULL,
+ "Half": NIC_DUPLEX_HALF}
+ names = set([x[0] for x in net_if_addrs()])
+ ret = {}
+ for name in names:
+ mtu = cext_posix.net_if_mtu(name)
+ flags = cext_posix.net_if_flags(name)
+
+ # try to get speed and duplex
+ # TODO: rewrite this in C (entstat forks, so use truss -f to follow.
+ # looks like it is using an undocumented ioctl?)
+ duplex = ""
+ speed = 0
+ p = subprocess.Popen(["/usr/bin/entstat", "-d", name],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode == 0:
+ re_result = re.search(
+ r"Running: (\d+) Mbps.*?(\w+) Duplex", stdout)
+ if re_result is not None:
+ speed = int(re_result.group(1))
+ duplex = re_result.group(2)
+
+ output_flags = ','.join(flags)
+ isup = 'running' in flags
+ duplex = duplex_map.get(duplex, NIC_DUPLEX_UNKNOWN)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu, output_flags)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(get_procfs_path()) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return os.path.exists(os.path.join(get_procfs_path(), str(pid), "psinfo"))
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def oneshot_enter(self):
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ return cext.proc_basic_info(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ if self.pid == 0:
+ return "swapper"
+ # note: max 16 characters
+ return cext.proc_name(self.pid, self._procfs_path).rstrip("\x00")
+
+ @wrap_exceptions
+ def exe(self):
+ # there is no way to get executable path in AIX other than to guess,
+ # and guessing is more complex than what's in the wrapping class
+ cmdline = self.cmdline()
+ if not cmdline:
+ return ''
+ exe = cmdline[0]
+ if os.path.sep in exe:
+ # relative or absolute path
+ if not os.path.isabs(exe):
+ # if cwd has changed, we're out of luck - this may be wrong!
+ exe = os.path.abspath(os.path.join(self.cwd(), exe))
+ if (os.path.isabs(exe) and
+ os.path.isfile(exe) and
+ os.access(exe, os.X_OK)):
+ return exe
+ # not found, move to search in PATH using basename only
+ exe = os.path.basename(exe)
+ # search for exe name PATH
+ for path in os.environ["PATH"].split(":"):
+ possible_exe = os.path.abspath(os.path.join(path, exe))
+ if (os.path.isfile(possible_exe) and
+ os.access(possible_exe, os.X_OK)):
+ return possible_exe
+ return ''
+
+ @wrap_exceptions
+ def cmdline(self):
+ return cext.proc_args(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ if HAS_THREADS:
+ @wrap_exceptions
+ def threads(self):
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ # The underlying C implementation retrieves all OS threads
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not retlist:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+ return ret
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ real, effective, saved, _, _, _ = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ _, _, _, real, effective, saved = self._proc_cred()
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ cpu_times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ return _common.pcputimes(*cpu_times)
+
+ @wrap_exceptions
+ def terminal(self):
+ ttydev = self._proc_basic_info()[proc_info_map['ttynr']]
+ # convert from 64-bit dev_t to 32-bit dev_t and then map the device
+ ttydev = (((ttydev & 0x0000FFFF00000000) >> 16) | (ttydev & 0xFFFF))
+ # try to match rdev of /dev/pts/* files ttydev
+ for dev in glob.glob("/dev/**/*"):
+ if os.stat(dev).st_rdev == ttydev:
+ return dev
+ return None
+
+ @wrap_exceptions
+ def cwd(self):
+ procfs_path = self._procfs_path
+ try:
+ result = os.readlink("%s/%s/cwd" % (procfs_path, self.pid))
+ return result.rstrip('/')
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ def open_files(self):
+ # TODO rewrite without using procfiles (stat /proc/pid/fd/* and then
+ # find matching name of the inode)
+ p = subprocess.Popen(["/usr/bin/procfiles", "-n", str(self.pid)],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if "no such process" in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ procfiles = re.findall(r"(\d+): S_IFREG.*\s*.*name:(.*)\n", stdout)
+ retlist = []
+ for fd, path in procfiles:
+ path = path.strip()
+ if path.startswith("//"):
+ path = path[1:]
+ if path.lower() == "cannot be retrieved":
+ continue
+ retlist.append(_common.popenfile(path, int(fd)))
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ if self.pid == 0: # no /proc/0/fd
+ return 0
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ if HAS_PROC_IO_COUNTERS:
+ @wrap_exceptions
+ def io_counters(self):
+ try:
+ rc, wc, rb, wb = cext.proc_io_counters(self.pid)
+ except OSError:
+ # if process is terminated, proc_io_counters returns OSError
+ # instead of NSP
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ raise
+ return _common.pio(rc, wc, rb, wb)
diff --git a/lib/psutil/_psbsd.py b/lib/psutil/_psbsd.py
new file mode 100644
index 0000000..a25c96c
--- /dev/null
+++ b/lib/psutil/_psbsd.py
@@ -0,0 +1,927 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""FreeBSD, OpenBSD and NetBSD platforms implementation."""
+
+import contextlib
+import errno
+import functools
+import os
+import xml.etree.ElementTree as ET
+from collections import defaultdict
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_bsd as cext
+from . import _psutil_posix as cext_posix
+from ._common import FREEBSD
+from ._common import NETBSD
+from ._common import OPENBSD
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import ZombieProcess
+from ._common import conn_tmap
+from ._common import conn_to_ntuple
+from ._common import memoize
+from ._common import memoize_when_activated
+from ._common import usage_percent
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import which
+
+
+__extra__all__ = []
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+if FREEBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SWAIT: _common.STATUS_WAITING,
+ cext.SLOCK: _common.STATUS_LOCKED,
+ }
+elif OPENBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ # According to /usr/include/sys/proc.h SZOMB is unused.
+ # test_zombie_process() shows that SDEAD is the right
+ # equivalent. Also it appears there's no equivalent of
+ # psutil.STATUS_DEAD. SDEAD really means STATUS_ZOMBIE.
+ # cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SDEAD: _common.STATUS_ZOMBIE,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ # From http://www.eecs.harvard.edu/~margo/cs161/videos/proc.h.txt
+ # OpenBSD has SRUN and SONPROC: SRUN indicates that a process
+ # is runnable but *not* yet running, i.e. is on a run queue.
+ # SONPROC indicates that the process is actually executing on
+ # a CPU, i.e. it is no longer on a run queue.
+ # As such we'll map SRUN to STATUS_WAKING and SONPROC to
+ # STATUS_RUNNING
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+elif NETBSD:
+ PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SRUN: _common.STATUS_WAKING,
+ cext.SONPROC: _common.STATUS_RUNNING,
+ }
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PAGESIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+HAS_PER_CPU_TIMES = hasattr(cext, "per_cpu_times")
+HAS_PROC_NUM_THREADS = hasattr(cext, "proc_num_threads")
+HAS_PROC_OPEN_FILES = hasattr(cext, 'proc_open_files')
+HAS_PROC_NUM_FDS = hasattr(cext, 'proc_num_fds')
+
+kinfo_proc_map = dict(
+ ppid=0,
+ status=1,
+ real_uid=2,
+ effective_uid=3,
+ saved_uid=4,
+ real_gid=5,
+ effective_gid=6,
+ saved_gid=7,
+ ttynr=8,
+ create_time=9,
+ ctx_switches_vol=10,
+ ctx_switches_unvol=11,
+ read_io_count=12,
+ write_io_count=13,
+ user_time=14,
+ sys_time=15,
+ ch_user_time=16,
+ ch_sys_time=17,
+ rss=18,
+ vms=19,
+ memtext=20,
+ memdata=21,
+ memstack=22,
+ cpunum=23,
+ name=24,
+)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.virtual_memory()
+svmem = namedtuple(
+ 'svmem', ['total', 'available', 'percent', 'used', 'free',
+ 'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
+# psutil.cpu_times()
+scputimes = namedtuple(
+ 'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms', 'text', 'data', 'stack'])
+# psutil.Process.memory_full_info()
+pfullmem = pmem
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple(
+ 'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
+# psutil.disk_io_counters()
+if FREEBSD:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time',
+ 'busy_time'])
+else:
+ sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes'])
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """System virtual memory as a namedtuple."""
+ mem = cext.virtual_mem()
+ total, free, active, inactive, wired, cached, buffers, shared = mem
+ if NETBSD:
+ # On NetBSD buffers and shared mem is determined via /proc.
+ # The C ext set them to 0.
+ with open('/proc/meminfo', 'rb') as f:
+ for line in f:
+ if line.startswith(b'Buffers:'):
+ buffers = int(line.split()[1]) * 1024
+ elif line.startswith(b'MemShared:'):
+ shared = int(line.split()[1]) * 1024
+ elif line.startswith(b'Cached:'):
+ cached = int(line.split()[1]) * 1024
+ avail = inactive + cached + free
+ used = active + wired + cached
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, used, free,
+ active, inactive, buffers, cached, shared, wired)
+
+
+def swap_memory():
+ """System swap memory as (total, used, free, sin, sout) namedtuple."""
+ total, used, free, sin, sout = cext.swap_mem()
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system per-CPU times as a namedtuple"""
+ user, nice, system, idle, irq = cext.cpu_times()
+ return scputimes(user, nice, system, idle, irq)
+
+
+if HAS_PER_CPU_TIMES:
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ ret = []
+ for cpu_t in cext.per_cpu_times():
+ user, nice, system, idle, irq = cpu_t
+ item = scputimes(user, nice, system, idle, irq)
+ ret.append(item)
+ return ret
+else:
+ # XXX
+ # Ok, this is very dirty.
+ # On FreeBSD < 8 we cannot gather per-cpu information, see:
+ # https://github.com/giampaolo/psutil/issues/226
+ # If num cpus > 1, on first call we return single cpu times to avoid a
+ # crash at psutil import time.
+ # Next calls will fail with NotImplementedError
+ def per_cpu_times():
+ """Return system CPU times as a namedtuple"""
+ if cpu_count_logical() == 1:
+ return [cpu_times()]
+ if per_cpu_times.__called__:
+ raise NotImplementedError("supported only starting from FreeBSD 8")
+ per_cpu_times.__called__ = True
+ return [cpu_times()]
+
+ per_cpu_times.__called__ = False
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ return cext.cpu_count_logical()
+
+
+if OPENBSD or NETBSD:
+ def cpu_count_cores():
+ # OpenBSD and NetBSD do not implement this.
+ return 1 if cpu_count_logical() == 1 else None
+else:
+ def cpu_count_cores():
+ """Return the number of CPU cores in the system."""
+ # From the C module we'll get an XML string similar to this:
+ # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
+ # We may get None in case "sysctl kern.sched.topology_spec"
+ # is not supported on this BSD version, in which case we'll mimic
+ # os.cpu_count() and return None.
+ ret = None
+ s = cext.cpu_topology()
+ if s is not None:
+ # get rid of padding chars appended at the end of the string
+ index = s.rfind("</groups>")
+ if index != -1:
+ s = s[:index + 9]
+ root = ET.fromstring(s)
+ try:
+ ret = len(root.findall('group/children/group/cpu')) or None
+ finally:
+ # needed otherwise it will memleak
+ root.clear()
+ if not ret:
+ # If logical CPUs == 1 it's obvious we' have only 1 core.
+ if cpu_count_logical() == 1:
+ return 1
+ return ret
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ if FREEBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps.
+ ctxsw, intrs, soft_intrs, syscalls, traps = cext.cpu_stats()
+ elif NETBSD:
+ # XXX
+ # Note about intrs: the C extension returns 0. intrs
+ # can be determined via /proc/stat; it has the same value as
+ # soft_intrs thought so the kernel is faking it (?).
+ #
+ # Note about syscalls: the C extension always sets it to 0 (?).
+ #
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ with open('/proc/stat', 'rb') as f:
+ for line in f:
+ if line.startswith(b'intr'):
+ intrs = int(line.split()[1])
+ elif OPENBSD:
+ # Note: the C ext is returning some metrics we are not exposing:
+ # traps, faults and forks.
+ ctxsw, intrs, soft_intrs, syscalls, traps, faults, forks = \
+ cext.cpu_stats()
+ return _common.scpustats(ctxsw, intrs, soft_intrs, syscalls)
+
+
+if FREEBSD:
+ def cpu_freq():
+ """Return frequency metrics for CPUs. As of Dec 2018 only
+ CPU 0 appears to be supported by FreeBSD and all other cores
+ match the frequency of CPU 0.
+ """
+ ret = []
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, available_freq = cext.cpu_freq(cpu)
+ except NotImplementedError:
+ continue
+ if available_freq:
+ try:
+ min_freq = int(available_freq.split(" ")[-1].split("/")[0])
+ except (IndexError, ValueError):
+ min_freq = None
+ try:
+ max_freq = int(available_freq.split(" ")[0].split("/")[0])
+ except (IndexError, ValueError):
+ max_freq = None
+ ret.append(_common.scpufreq(current, min_freq, max_freq))
+ return ret
+elif OPENBSD:
+ def cpu_freq():
+ curr = float(cext.cpu_freq())
+ return [_common.scpufreq(curr, 0.0, 0.0)]
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+def disk_partitions(all=False):
+ """Return mounted disk partitions as a list of namedtuples.
+ 'all' argument is ignored, see:
+ https://github.com/giampaolo/psutil/issues/906
+ """
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+disk_usage = _psposix.disk_usage
+disk_io_counters = cext.disk_io_counters
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ names = net_io_counters().keys()
+ ret = {}
+ for name in names:
+ try:
+ mtu = cext_posix.net_if_mtu(name)
+ flags = cext_posix.net_if_flags(name)
+ duplex, speed = cext_posix.net_if_duplex_speed(name)
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1279
+ if err.errno != errno.ENODEV:
+ raise
+ else:
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ output_flags = ','.join(flags)
+ isup = 'running' in flags
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu,
+ output_flags)
+ return ret
+
+
+def net_connections(kind):
+ """System-wide network connections."""
+ if OPENBSD:
+ ret = []
+ for pid in pids():
+ try:
+ cons = Process(pid).connections(kind)
+ except (NoSuchProcess, ZombieProcess):
+ continue
+ else:
+ for conn in cons:
+ conn = list(conn)
+ conn.append(pid)
+ ret.append(_common.sconn(*conn))
+ return ret
+
+ if kind not in _common.conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+ families, types = conn_tmap[kind]
+ ret = set()
+ if NETBSD:
+ rawlist = cext.net_connections(-1)
+ else:
+ rawlist = cext.net_connections()
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ # TODO: apply filter at C level
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES, pid)
+ ret.add(nt)
+ return list(ret)
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+if FREEBSD:
+
+ def sensors_battery():
+ """Return battery info."""
+ try:
+ percent, minsleft, power_plugged = cext.sensors_battery()
+ except NotImplementedError:
+ # See: https://github.com/giampaolo/psutil/issues/1074
+ return None
+ power_plugged = power_plugged == 1
+ if power_plugged:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif minsleft == -1:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ else:
+ secsleft = minsleft * 60
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+ def sensors_temperatures():
+ """Return CPU cores temperatures if available, else an empty dict."""
+ ret = defaultdict(list)
+ num_cpus = cpu_count_logical()
+ for cpu in range(num_cpus):
+ try:
+ current, high = cext.sensors_cpu_temperature(cpu)
+ if high <= 0:
+ high = None
+ name = "Core %s" % cpu
+ ret["coretemp"].append(
+ _common.shwtemp(name, current, high, high))
+ except NotImplementedError:
+ pass
+
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, tty, hostname, tstamp, pid = item
+ if pid == -1:
+ assert OPENBSD
+ pid = None
+ if tty == '~':
+ continue # reboot or shutdown
+ nt = _common.suser(user, tty or None, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+@memoize
+def _pid_0_exists():
+ try:
+ Process(0).name()
+ except NoSuchProcess:
+ return False
+ except AccessDenied:
+ return True
+ else:
+ return True
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ ret = cext.pids()
+ if OPENBSD and (0 not in ret) and _pid_0_exists():
+ # On OpenBSD the kernel does not return PID 0 (neither does
+ # ps) but it's actually querable (Process(0) will succeed).
+ ret.insert(0, 0)
+ return ret
+
+
+if OPENBSD or NETBSD:
+ def pid_exists(pid):
+ """Return True if pid exists."""
+ exists = _psposix.pid_exists(pid)
+ if not exists:
+ # We do this because _psposix.pid_exists() lies in case of
+ # zombie processes.
+ return pid in pids()
+ else:
+ return True
+else:
+ pid_exists = _psposix.pid_exists
+
+
+def is_zombie(pid):
+ try:
+ st = cext.proc_oneshot_info(pid)[kinfo_proc_map['status']]
+ return st == cext.SZOMB
+ except Exception:
+ return False
+
+
+def wrap_exceptions(fun):
+ """Decorator which translates bare OSError exceptions into
+ NoSuchProcess and AccessDenied.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except ProcessLookupError:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ raise NoSuchProcess(self.pid, self._name)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+@contextlib.contextmanager
+def wrap_exceptions_procfs(inst):
+ """Same as above, for routines relying on reading /proc fs."""
+ try:
+ yield
+ except (ProcessLookupError, FileNotFoundError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if is_zombie(inst.pid):
+ raise ZombieProcess(inst.pid, inst._name, inst._ppid)
+ else:
+ raise NoSuchProcess(inst.pid, inst._name)
+ except PermissionError:
+ raise AccessDenied(inst.pid, inst._name)
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def oneshot(self):
+ """Retrieves multiple process info in one shot as a raw tuple."""
+ ret = cext.proc_oneshot_info(self.pid)
+ assert len(ret) == len(kinfo_proc_map)
+ return ret
+
+ def oneshot_enter(self):
+ self.oneshot.cache_activate(self)
+
+ def oneshot_exit(self):
+ self.oneshot.cache_deactivate(self)
+
+ @wrap_exceptions
+ def name(self):
+ name = self.oneshot()[kinfo_proc_map['name']]
+ return name if name is not None else cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ def exe(self):
+ if FREEBSD:
+ if self.pid == 0:
+ return '' # else NSP
+ return cext.proc_exe(self.pid)
+ elif NETBSD:
+ if self.pid == 0:
+ # /proc/0 dir exists but /proc/0/exe doesn't
+ return ""
+ with wrap_exceptions_procfs(self):
+ return os.readlink("/proc/%s/exe" % self.pid)
+ else:
+ # OpenBSD: exe cannot be determined; references:
+ # https://chromium.googlesource.com/chromium/src/base/+/
+ # master/base_paths_posix.cc
+ # We try our best guess by using which against the first
+ # cmdline arg (may return None).
+ cmdline = self.cmdline()
+ if cmdline:
+ return which(cmdline[0]) or ""
+ else:
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ if OPENBSD and self.pid == 0:
+ return [] # ...else it crashes
+ elif NETBSD:
+ # XXX - most of the times the underlying sysctl() call on Net
+ # and Open BSD returns a truncated string.
+ # Also /proc/pid/cmdline behaves the same so it looks
+ # like this is a kernel bug.
+ try:
+ return cext.proc_cmdline(self.pid)
+ except OSError as err:
+ if err.errno == errno.EINVAL:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ elif not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name, self._ppid)
+ else:
+ # XXX: this happens with unicode tests. It means the C
+ # routine is unable to decode invalid unicode chars.
+ return []
+ else:
+ raise
+ else:
+ return cext.proc_cmdline(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid)
+
+ @wrap_exceptions
+ def terminal(self):
+ tty_nr = self.oneshot()[kinfo_proc_map['ttynr']]
+ tmap = _psposix.get_terminal_map()
+ try:
+ return tmap[tty_nr]
+ except KeyError:
+ return None
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self.oneshot()[kinfo_proc_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ rawtuple = self.oneshot()
+ return _common.puids(
+ rawtuple[kinfo_proc_map['real_uid']],
+ rawtuple[kinfo_proc_map['effective_uid']],
+ rawtuple[kinfo_proc_map['saved_uid']])
+
+ @wrap_exceptions
+ def gids(self):
+ rawtuple = self.oneshot()
+ return _common.pgids(
+ rawtuple[kinfo_proc_map['real_gid']],
+ rawtuple[kinfo_proc_map['effective_gid']],
+ rawtuple[kinfo_proc_map['saved_gid']])
+
+ @wrap_exceptions
+ def cpu_times(self):
+ rawtuple = self.oneshot()
+ return _common.pcputimes(
+ rawtuple[kinfo_proc_map['user_time']],
+ rawtuple[kinfo_proc_map['sys_time']],
+ rawtuple[kinfo_proc_map['ch_user_time']],
+ rawtuple[kinfo_proc_map['ch_sys_time']])
+
+ if FREEBSD:
+ @wrap_exceptions
+ def cpu_num(self):
+ return self.oneshot()[kinfo_proc_map['cpunum']]
+
+ @wrap_exceptions
+ def memory_info(self):
+ rawtuple = self.oneshot()
+ return pmem(
+ rawtuple[kinfo_proc_map['rss']],
+ rawtuple[kinfo_proc_map['vms']],
+ rawtuple[kinfo_proc_map['memtext']],
+ rawtuple[kinfo_proc_map['memdata']],
+ rawtuple[kinfo_proc_map['memstack']])
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def create_time(self):
+ return self.oneshot()[kinfo_proc_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ if HAS_PROC_NUM_THREADS:
+ # FreeBSD
+ return cext.proc_num_threads(self.pid)
+ else:
+ return len(self.threads())
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ rawtuple = self.oneshot()
+ return _common.pctxsw(
+ rawtuple[kinfo_proc_map['ctx_switches_vol']],
+ rawtuple[kinfo_proc_map['ctx_switches_unvol']])
+
+ @wrap_exceptions
+ def threads(self):
+ # Note: on OpenSBD this (/dev/mem) requires root access.
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ if OPENBSD:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ if kind not in conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+
+ if NETBSD:
+ families, types = conn_tmap[kind]
+ ret = []
+ rawlist = cext.net_connections(self.pid)
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ assert pid == self.pid
+ if fam in families and type in types:
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+ self._assert_alive()
+ return list(ret)
+
+ families, types = conn_tmap[kind]
+ rawlist = cext.proc_connections(self.pid, families, types)
+ ret = []
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status = item
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+
+ if OPENBSD:
+ self._assert_alive()
+
+ return ret
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def status(self):
+ code = self.oneshot()[kinfo_proc_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def io_counters(self):
+ rawtuple = self.oneshot()
+ return _common.pio(
+ rawtuple[kinfo_proc_map['read_io_count']],
+ rawtuple[kinfo_proc_map['write_io_count']],
+ -1,
+ -1)
+
+ @wrap_exceptions
+ def cwd(self):
+ """Return process current working directory."""
+ # sometimes we get an empty string, in which case we turn
+ # it into None
+ if OPENBSD and self.pid == 0:
+ return None # ...else it would raise EINVAL
+ elif NETBSD or HAS_PROC_OPEN_FILES:
+ # FreeBSD < 8 does not support functions based on
+ # kinfo_getfile() and kinfo_getvmmap()
+ return cext.proc_cwd(self.pid) or None
+ else:
+ raise NotImplementedError(
+ "supported only starting from FreeBSD 8" if
+ FREEBSD else "")
+
+ nt_mmap_grouped = namedtuple(
+ 'mmap', 'path rss, private, ref_count, shadow_count')
+ nt_mmap_ext = namedtuple(
+ 'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
+
+ def _not_implemented(self):
+ raise NotImplementedError
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_OPEN_FILES:
+ @wrap_exceptions
+ def open_files(self):
+ """Return files opened by process as a list of namedtuples."""
+ rawlist = cext.proc_open_files(self.pid)
+ return [_common.popenfile(path, fd) for path, fd in rawlist]
+ else:
+ open_files = _not_implemented
+
+ # FreeBSD < 8 does not support functions based on kinfo_getfile()
+ # and kinfo_getvmmap()
+ if HAS_PROC_NUM_FDS:
+ @wrap_exceptions
+ def num_fds(self):
+ """Return the number of file descriptors opened by this process."""
+ ret = cext.proc_num_fds(self.pid)
+ if NETBSD:
+ self._assert_alive()
+ return ret
+ else:
+ num_fds = _not_implemented
+
+ # --- FreeBSD only APIs
+
+ if FREEBSD:
+
+ @wrap_exceptions
+ def cpu_affinity_get(self):
+ return cext.proc_cpu_affinity_get(self.pid)
+
+ @wrap_exceptions
+ def cpu_affinity_set(self, cpus):
+ # Pre-emptively check if CPUs are valid because the C
+ # function has a weird behavior in case of invalid CPUs,
+ # see: https://github.com/giampaolo/psutil/issues/586
+ allcpus = tuple(range(len(per_cpu_times())))
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError("invalid CPU #%i (choose between %s)"
+ % (cpu, allcpus))
+ try:
+ cext.proc_cpu_affinity_set(self.pid, cpus)
+ except OSError as err:
+ # 'man cpuset_setaffinity' about EDEADLK:
+ # <<the call would leave a thread without a valid CPU to run
+ # on because the set does not overlap with the thread's
+ # anonymous mask>>
+ if err.errno in (errno.EINVAL, errno.EDEADLK):
+ for cpu in cpus:
+ if cpu not in allcpus:
+ raise ValueError(
+ "invalid CPU #%i (choose between %s)" % (
+ cpu, allcpus))
+ raise
+
+ @wrap_exceptions
+ def memory_maps(self):
+ return cext.proc_memory_maps(self.pid)
+
+ @wrap_exceptions
+ def rlimit(self, resource, limits=None):
+ if limits is None:
+ return cext.proc_getrlimit(self.pid, resource)
+ else:
+ if len(limits) != 2:
+ raise ValueError(
+ "second argument must be a (soft, hard) tuple, "
+ "got %s" % repr(limits))
+ soft, hard = limits
+ return cext.proc_setrlimit(self.pid, resource, soft, hard)
diff --git a/lib/psutil/_pslinux.py b/lib/psutil/_pslinux.py
new file mode 100644
index 0000000..9dc9643
--- /dev/null
+++ b/lib/psutil/_pslinux.py
@@ -0,0 +1,2257 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux platform implementation."""
+
+from __future__ import division
+
+import base64
+import collections
+import errno
+import functools
+import glob
+import os
+import re
+import socket
+import struct
+import sys
+import traceback
+import warnings
+from collections import defaultdict
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_linux as cext
+from . import _psutil_posix as cext_posix
+from ._common import NIC_DUPLEX_FULL
+from ._common import NIC_DUPLEX_HALF
+from ._common import NIC_DUPLEX_UNKNOWN
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import ZombieProcess
+from ._common import bcat
+from ._common import cat
+from ._common import debug
+from ._common import decode
+from ._common import get_procfs_path
+from ._common import isfile_strict
+from ._common import memoize
+from ._common import memoize_when_activated
+from ._common import open_binary
+from ._common import open_text
+from ._common import parse_environ_block
+from ._common import path_exists_strict
+from ._common import supports_ipv6
+from ._common import usage_percent
+from ._compat import PY3
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import b
+from ._compat import basestring
+
+
+if sys.version_info >= (3, 4):
+ import enum
+else:
+ enum = None
+
+
+__extra__all__ = [
+ #
+ 'PROCFS_PATH',
+ # io prio constants
+ "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
+ "IOPRIO_CLASS_IDLE",
+ # connection status constants
+ "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
+ "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
+ "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+POWER_SUPPLY_PATH = "/sys/class/power_supply"
+HAS_PROC_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid())
+HAS_PROC_SMAPS_ROLLUP = os.path.exists('/proc/%s/smaps_rollup' % os.getpid())
+HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get")
+HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get")
+
+# Number of clock ticks per second
+CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
+PAGESIZE = cext_posix.getpagesize()
+BOOT_TIME = None # set later
+LITTLE_ENDIAN = sys.byteorder == 'little'
+
+# "man iostat" states that sectors are equivalent with blocks and have
+# a size of 512 bytes. Despite this value can be queried at runtime
+# via /sys/block/{DISK}/queue/hw_sector_size and results may vary
+# between 1k, 2k, or 4k... 512 appears to be a magic constant used
+# throughout Linux source code:
+# * https://stackoverflow.com/a/38136179/376587
+# * https://lists.gt.net/linux/kernel/2241060
+# * https://github.com/giampaolo/psutil/issues/1305
+# * https://github.com/torvalds/linux/blob/
+# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99
+# * https://lkml.org/lkml/2015/8/17/234
+DISK_SECTOR_SIZE = 512
+
+if enum is None:
+ AF_LINK = socket.AF_PACKET
+else:
+ AddressFamily = enum.IntEnum('AddressFamily',
+ {'AF_LINK': int(socket.AF_PACKET)})
+ AF_LINK = AddressFamily.AF_LINK
+
+# ioprio_* constants http://linux.die.net/man/2/ioprio_get
+if enum is None:
+ IOPRIO_CLASS_NONE = 0
+ IOPRIO_CLASS_RT = 1
+ IOPRIO_CLASS_BE = 2
+ IOPRIO_CLASS_IDLE = 3
+else:
+ class IOPriority(enum.IntEnum):
+ IOPRIO_CLASS_NONE = 0
+ IOPRIO_CLASS_RT = 1
+ IOPRIO_CLASS_BE = 2
+ IOPRIO_CLASS_IDLE = 3
+
+ globals().update(IOPriority.__members__)
+
+# See:
+# https://github.com/torvalds/linux/blame/master/fs/proc/array.c
+# ...and (TASK_* constants):
+# https://github.com/torvalds/linux/blob/master/include/linux/sched.h
+PROC_STATUSES = {
+ "R": _common.STATUS_RUNNING,
+ "S": _common.STATUS_SLEEPING,
+ "D": _common.STATUS_DISK_SLEEP,
+ "T": _common.STATUS_STOPPED,
+ "t": _common.STATUS_TRACING_STOP,
+ "Z": _common.STATUS_ZOMBIE,
+ "X": _common.STATUS_DEAD,
+ "x": _common.STATUS_DEAD,
+ "K": _common.STATUS_WAKE_KILL,
+ "W": _common.STATUS_WAKING,
+ "I": _common.STATUS_IDLE,
+ "P": _common.STATUS_PARKED,
+}
+
+# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h
+TCP_STATUSES = {
+ "01": _common.CONN_ESTABLISHED,
+ "02": _common.CONN_SYN_SENT,
+ "03": _common.CONN_SYN_RECV,
+ "04": _common.CONN_FIN_WAIT1,
+ "05": _common.CONN_FIN_WAIT2,
+ "06": _common.CONN_TIME_WAIT,
+ "07": _common.CONN_CLOSE,
+ "08": _common.CONN_CLOSE_WAIT,
+ "09": _common.CONN_LAST_ACK,
+ "0A": _common.CONN_LISTEN,
+ "0B": _common.CONN_CLOSING
+}
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.virtual_memory()
+svmem = namedtuple(
+ 'svmem', ['total', 'available', 'percent', 'used', 'free',
+ 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'])
+# psutil.disk_io_counters()
+sdiskio = namedtuple(
+ 'sdiskio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_time', 'write_time',
+ 'read_merged_count', 'write_merged_count',
+ 'busy_time'])
+# psutil.Process().open_files()
+popenfile = namedtuple(
+ 'popenfile', ['path', 'fd', 'position', 'mode', 'flags'])
+# psutil.Process().memory_info()
+pmem = namedtuple('pmem', 'rss vms shared text lib data dirty')
+# psutil.Process().memory_full_info()
+pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap'))
+# psutil.Process().memory_maps(grouped=True)
+pmmap_grouped = namedtuple(
+ 'pmmap_grouped',
+ ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty',
+ 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap'])
+# psutil.Process().memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'read_chars', 'write_chars'])
+# psutil.Process.cpu_times()
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system',
+ 'iowait'])
+
+
+# =====================================================================
+# --- utils
+# =====================================================================
+
+
+def readlink(path):
+ """Wrapper around os.readlink()."""
+ assert isinstance(path, basestring), path
+ path = os.readlink(path)
+ # readlink() might return paths containing null bytes ('\x00')
+ # resulting in "TypeError: must be encoded string without NULL
+ # bytes, not str" errors when the string is passed to other
+ # fs-related functions (os.*, open(), ...).
+ # Apparently everything after '\x00' is garbage (we can have
+ # ' (deleted)', 'new' and possibly others), see:
+ # https://github.com/giampaolo/psutil/issues/717
+ path = path.split('\x00')[0]
+ # Certain paths have ' (deleted)' appended. Usually this is
+ # bogus as the file actually exists. Even if it doesn't we
+ # don't care.
+ if path.endswith(' (deleted)') and not path_exists_strict(path):
+ path = path[:-10]
+ return path
+
+
+def file_flags_to_mode(flags):
+ """Convert file's open() flags into a readable string.
+ Used by Process.open_files().
+ """
+ modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
+ mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
+ if flags & os.O_APPEND:
+ mode = mode.replace('w', 'a', 1)
+ mode = mode.replace('w+', 'r+')
+ # possible values: r, w, a, r+, a+
+ return mode
+
+
+def is_storage_device(name):
+ """Return True if the given name refers to a root device (e.g.
+ "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1",
+ "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram")
+ return True.
+ """
+ # Re-adapted from iostat source code, see:
+ # https://github.com/sysstat/sysstat/blob/
+ # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208
+ # Some devices may have a slash in their name (e.g. cciss/c0d0...).
+ name = name.replace('/', '!')
+ including_virtual = True
+ if including_virtual:
+ path = "/sys/block/%s" % name
+ else:
+ path = "/sys/block/%s/device" % name
+ return os.access(path, os.F_OK)
+
+
+@memoize
+def set_scputimes_ntuple(procfs_path):
+ """Set a namedtuple of variable fields depending on the CPU times
+ available on this Linux kernel version which may be:
+ (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
+ [guest_nice]]])
+ Used by cpu_times() function.
+ """
+ global scputimes
+ with open_binary('%s/stat' % procfs_path) as f:
+ values = f.readline().split()[1:]
+ fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
+ vlen = len(values)
+ if vlen >= 8:
+ # Linux >= 2.6.11
+ fields.append('steal')
+ if vlen >= 9:
+ # Linux >= 2.6.24
+ fields.append('guest')
+ if vlen >= 10:
+ # Linux >= 3.2.0
+ fields.append('guest_nice')
+ scputimes = namedtuple('scputimes', fields)
+
+
+try:
+ set_scputimes_ntuple("/proc")
+except Exception: # pragma: no cover
+ # Don't want to crash at import time.
+ traceback.print_exc()
+ scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0)
+
+
+# =====================================================================
+# --- prlimit
+# =====================================================================
+
+# Backport of resource.prlimit() for Python 2. Originally this was done
+# in C, but CentOS-6 which we use to create manylinux wheels is too old
+# and does not support prlimit() syscall. As such the resulting wheel
+# would not include prlimit(), even when installed on newer systems.
+# This is the only part of psutil using ctypes.
+
+prlimit = None
+try:
+ from resource import prlimit # python >= 3.4
+except ImportError:
+ import ctypes
+
+ libc = ctypes.CDLL(None, use_errno=True)
+
+ if hasattr(libc, "prlimit"):
+
+ def prlimit(pid, resource_, limits=None):
+ class StructRlimit(ctypes.Structure):
+ _fields_ = [('rlim_cur', ctypes.c_longlong),
+ ('rlim_max', ctypes.c_longlong)]
+
+ current = StructRlimit()
+ if limits is None:
+ # get
+ ret = libc.prlimit(pid, resource_, None, ctypes.byref(current))
+ else:
+ # set
+ new = StructRlimit()
+ new.rlim_cur = limits[0]
+ new.rlim_max = limits[1]
+ ret = libc.prlimit(
+ pid, resource_, ctypes.byref(new), ctypes.byref(current))
+
+ if ret != 0:
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return (current.rlim_cur, current.rlim_max)
+
+
+if prlimit is not None:
+ __extra__all__.extend(
+ [x for x in dir(cext) if x.startswith('RLIM') and x.isupper()])
+
+
+# =====================================================================
+# --- system memory
+# =====================================================================
+
+
+def calculate_avail_vmem(mems):
+ """Fallback for kernels < 3.14 where /proc/meminfo does not provide
+ "MemAvailable:" column, see:
+ https://blog.famzah.net/2014/09/24/
+ This code reimplements the algorithm outlined here:
+ https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
+ commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+
+ XXX: on recent kernels this calculation differs by ~1.5% than
+ "MemAvailable:" as it's calculated slightly differently, see:
+ https://gitlab.com/procps-ng/procps/issues/42
+ https://github.com/famzah/linux-memavailable-procfs/issues/2
+ It is still way more realistic than doing (free + cached) though.
+ """
+ # Fallback for very old distros. According to
+ # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
+ # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ # ...long ago "avail" was calculated as (free + cached).
+ # We might fallback in such cases:
+ # "Active(file)" not available: 2.6.28 / Dec 2008
+ # "Inactive(file)" not available: 2.6.28 / Dec 2008
+ # "SReclaimable:" not available: 2.6.19 / Nov 2006
+ # /proc/zoneinfo not available: 2.6.13 / Aug 2005
+ free = mems[b'MemFree:']
+ fallback = free + mems.get(b"Cached:", 0)
+ try:
+ lru_active_file = mems[b'Active(file):']
+ lru_inactive_file = mems[b'Inactive(file):']
+ slab_reclaimable = mems[b'SReclaimable:']
+ except KeyError:
+ return fallback
+ try:
+ f = open_binary('%s/zoneinfo' % get_procfs_path())
+ except IOError:
+ return fallback # kernel 2.6.13
+
+ watermark_low = 0
+ with f:
+ for line in f:
+ line = line.strip()
+ if line.startswith(b'low'):
+ watermark_low += int(line.split()[1])
+ watermark_low *= PAGESIZE
+
+ avail = free - watermark_low
+ pagecache = lru_active_file + lru_inactive_file
+ pagecache -= min(pagecache / 2, watermark_low)
+ avail += pagecache
+ avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low)
+ return int(avail)
+
+
+def virtual_memory():
+ """Report virtual memory stats.
+ This implementation matches "free" and "vmstat -s" cmdline
+ utility values and procps-ng-3.3.12 source was used as a reference
+ (2016-09-18):
+ https://gitlab.com/procps-ng/procps/blob/
+ 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c
+ For reference, procps-ng-3.3.10 is the version available on Ubuntu
+ 16.04.
+
+ Note about "available" memory: up until psutil 4.3 it was
+ calculated as "avail = (free + buffers + cached)". Now
+ "MemAvailable:" column (kernel 3.14) from /proc/meminfo is used as
+ it's more accurate.
+ That matches "available" column in newer versions of "free".
+ """
+ missing_fields = []
+ mems = {}
+ with open_binary('%s/meminfo' % get_procfs_path()) as f:
+ for line in f:
+ fields = line.split()
+ mems[fields[0]] = int(fields[1]) * 1024
+
+ # /proc doc states that the available fields in /proc/meminfo vary
+ # by architecture and compile options, but these 3 values are also
+ # returned by sysinfo(2); as such we assume they are always there.
+ total = mems[b'MemTotal:']
+ free = mems[b'MemFree:']
+ try:
+ buffers = mems[b'Buffers:']
+ except KeyError:
+ # https://github.com/giampaolo/psutil/issues/1010
+ buffers = 0
+ missing_fields.append('buffers')
+ try:
+ cached = mems[b"Cached:"]
+ except KeyError:
+ cached = 0
+ missing_fields.append('cached')
+ else:
+ # "free" cmdline utility sums reclaimable to cached.
+ # Older versions of procps used to add slab memory instead.
+ # This got changed in:
+ # https://gitlab.com/procps-ng/procps/commit/
+ # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
+ cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
+
+ try:
+ shared = mems[b'Shmem:'] # since kernel 2.6.32
+ except KeyError:
+ try:
+ shared = mems[b'MemShared:'] # kernels 2.4
+ except KeyError:
+ shared = 0
+ missing_fields.append('shared')
+
+ try:
+ active = mems[b"Active:"]
+ except KeyError:
+ active = 0
+ missing_fields.append('active')
+
+ try:
+ inactive = mems[b"Inactive:"]
+ except KeyError:
+ try:
+ inactive = \
+ mems[b"Inact_dirty:"] + \
+ mems[b"Inact_clean:"] + \
+ mems[b"Inact_laundry:"]
+ except KeyError:
+ inactive = 0
+ missing_fields.append('inactive')
+
+ try:
+ slab = mems[b"Slab:"]
+ except KeyError:
+ slab = 0
+
+ used = total - free - cached - buffers
+ if used < 0:
+ # May be symptomatic of running within a LCX container where such
+ # values will be dramatically distorted over those of the host.
+ used = total - free
+
+ # - starting from 4.4.0 we match free's "available" column.
+ # Before 4.4.0 we calculated it as (free + buffers + cached)
+ # which matched htop.
+ # - free and htop available memory differs as per:
+ # http://askubuntu.com/a/369589
+ # http://unix.stackexchange.com/a/65852/168884
+ # - MemAvailable has been introduced in kernel 3.14
+ try:
+ avail = mems[b'MemAvailable:']
+ except KeyError:
+ avail = calculate_avail_vmem(mems)
+
+ if avail < 0:
+ avail = 0
+ missing_fields.append('available')
+
+ # If avail is greater than total or our calculation overflows,
+ # that's symptomatic of running within a LCX container where such
+ # values will be dramatically distorted over those of the host.
+ # https://gitlab.com/procps-ng/procps/blob/
+ # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764
+ if avail > total:
+ avail = free
+
+ percent = usage_percent((total - avail), total, round_=1)
+
+ # Warn about missing metrics which are set to 0.
+ if missing_fields:
+ msg = "%s memory stats couldn't be determined and %s set to 0" % (
+ ", ".join(missing_fields),
+ "was" if len(missing_fields) == 1 else "were")
+ warnings.warn(msg, RuntimeWarning)
+
+ return svmem(total, avail, percent, used, free,
+ active, inactive, buffers, cached, shared, slab)
+
+
+def swap_memory():
+ """Return swap memory metrics."""
+ mems = {}
+ with open_binary('%s/meminfo' % get_procfs_path()) as f:
+ for line in f:
+ fields = line.split()
+ mems[fields[0]] = int(fields[1]) * 1024
+ # We prefer /proc/meminfo over sysinfo() syscall so that
+ # psutil.PROCFS_PATH can be used in order to allow retrieval
+ # for linux containers, see:
+ # https://github.com/giampaolo/psutil/issues/1015
+ try:
+ total = mems[b'SwapTotal:']
+ free = mems[b'SwapFree:']
+ except KeyError:
+ _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
+ total *= unit_multiplier
+ free *= unit_multiplier
+
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ # get pgin/pgouts
+ try:
+ f = open_binary("%s/vmstat" % get_procfs_path())
+ except IOError as err:
+ # see https://github.com/giampaolo/psutil/issues/722
+ msg = "'sin' and 'sout' swap memory stats couldn't " \
+ "be determined and were set to 0 (%s)" % str(err)
+ warnings.warn(msg, RuntimeWarning)
+ sin = sout = 0
+ else:
+ with f:
+ sin = sout = None
+ for line in f:
+ # values are expressed in 4 kilo bytes, we want
+ # bytes instead
+ if line.startswith(b'pswpin'):
+ sin = int(line.split(b' ')[1]) * 4 * 1024
+ elif line.startswith(b'pswpout'):
+ sout = int(line.split(b' ')[1]) * 4 * 1024
+ if sin is not None and sout is not None:
+ break
+ else:
+ # we might get here when dealing with exotic Linux
+ # flavors, see:
+ # https://github.com/giampaolo/psutil/issues/313
+ msg = "'sin' and 'sout' swap memory stats couldn't " \
+ "be determined and were set to 0"
+ warnings.warn(msg, RuntimeWarning)
+ sin = sout = 0
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return a named tuple representing the following system-wide
+ CPU times:
+ (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
+ [guest_nice]]])
+ Last 3 fields may not be available on all Linux kernel versions.
+ """
+ procfs_path = get_procfs_path()
+ set_scputimes_ntuple(procfs_path)
+ with open_binary('%s/stat' % procfs_path) as f:
+ values = f.readline().split()
+ fields = values[1:len(scputimes._fields) + 1]
+ fields = [float(x) / CLOCK_TICKS for x in fields]
+ return scputimes(*fields)
+
+
+def per_cpu_times():
+ """Return a list of namedtuple representing the CPU times
+ for every CPU available on the system.
+ """
+ procfs_path = get_procfs_path()
+ set_scputimes_ntuple(procfs_path)
+ cpus = []
+ with open_binary('%s/stat' % procfs_path) as f:
+ # get rid of the first line which refers to system wide CPU stats
+ f.readline()
+ for line in f:
+ if line.startswith(b'cpu'):
+ values = line.split()
+ fields = values[1:len(scputimes._fields) + 1]
+ fields = [float(x) / CLOCK_TICKS for x in fields]
+ entry = scputimes(*fields)
+ cpus.append(entry)
+ return cpus
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # as a second fallback we try to parse /proc/cpuinfo
+ num = 0
+ with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
+ for line in f:
+ if line.lower().startswith(b'processor'):
+ num += 1
+
+ # unknown format (e.g. amrel/sparc architectures), see:
+ # https://github.com/giampaolo/psutil/issues/200
+ # try to parse /proc/stat as a last resort
+ if num == 0:
+ search = re.compile(r'cpu\d')
+ with open_text('%s/stat' % get_procfs_path()) as f:
+ for line in f:
+ line = line.split(' ')[0]
+ if search.match(line):
+ num += 1
+
+ if num == 0:
+ # mimic os.cpu_count()
+ return None
+ return num
+
+
+def cpu_count_cores():
+ """Return the number of CPU cores in the system."""
+ # Method #1
+ ls = set()
+ # These 2 files are the same but */core_cpus_list is newer while
+ # */thread_siblings_list is deprecated and may disappear in the future.
+ # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
+ # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
+ # https://lkml.org/lkml/2019/2/26/41
+ p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list"
+ p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"
+ for path in glob.glob(p1) or glob.glob(p2):
+ with open_binary(path) as f:
+ ls.add(f.read().strip())
+ result = len(ls)
+ if result != 0:
+ return result
+
+ # Method #2
+ mapping = {}
+ current_info = {}
+ with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
+ for line in f:
+ line = line.strip().lower()
+ if not line:
+ # new section
+ try:
+ mapping[current_info[b'physical id']] = \
+ current_info[b'cpu cores']
+ except KeyError:
+ pass
+ current_info = {}
+ else:
+ # ongoing section
+ if line.startswith((b'physical id', b'cpu cores')):
+ key, value = line.split(b'\t:', 1)
+ current_info[key] = int(value)
+
+ result = sum(mapping.values())
+ return result or None # mimic os.cpu_count()
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ with open_binary('%s/stat' % get_procfs_path()) as f:
+ ctx_switches = None
+ interrupts = None
+ soft_interrupts = None
+ for line in f:
+ if line.startswith(b'ctxt'):
+ ctx_switches = int(line.split()[1])
+ elif line.startswith(b'intr'):
+ interrupts = int(line.split()[1])
+ elif line.startswith(b'softirq'):
+ soft_interrupts = int(line.split()[1])
+ if ctx_switches is not None and soft_interrupts is not None \
+ and interrupts is not None:
+ break
+ syscalls = 0
+ return _common.scpustats(
+ ctx_switches, interrupts, soft_interrupts, syscalls)
+
+
+def _cpu_get_cpuinfo_freq():
+ """Return current CPU frequency from cpuinfo if available.
+ """
+ ret = []
+ with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
+ for line in f:
+ if line.lower().startswith(b'cpu mhz'):
+ ret.append(float(line.split(b':', 1)[1]))
+ return ret
+
+
+if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or \
+ os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"):
+ def cpu_freq():
+ """Return frequency metrics for all CPUs.
+ Contrarily to other OSes, Linux updates these values in
+ real-time.
+ """
+ cpuinfo_freqs = _cpu_get_cpuinfo_freq()
+ paths = \
+ glob.glob("/sys/devices/system/cpu/cpufreq/policy[0-9]*") or \
+ glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq")
+ paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group()))
+ ret = []
+ pjoin = os.path.join
+ for i, path in enumerate(paths):
+ if len(paths) == len(cpuinfo_freqs):
+ # take cached value from cpuinfo if available, see:
+ # https://github.com/giampaolo/psutil/issues/1851
+ curr = cpuinfo_freqs[i] * 1000
+ else:
+ curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None)
+ if curr is None:
+ # Likely an old RedHat, see:
+ # https://github.com/giampaolo/psutil/issues/1071
+ curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None)
+ if curr is None:
+ raise NotImplementedError(
+ "can't find current frequency file")
+ curr = int(curr) / 1000
+ max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000
+ min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000
+ ret.append(_common.scpufreq(curr, min_, max_))
+ return ret
+
+else:
+ def cpu_freq():
+ """Alternate implementation using /proc/cpuinfo.
+ min and max frequencies are not available and are set to None.
+ """
+ return [_common.scpufreq(x, 0., 0.) for x in _cpu_get_cpuinfo_freq()]
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_if_addrs = cext_posix.net_if_addrs
+
+
+class _Ipv6UnsupportedError(Exception):
+ pass
+
+
+class Connections:
+ """A wrapper on top of /proc/net/* files, retrieving per-process
+ and system-wide open connections (TCP, UDP, UNIX) similarly to
+ "netstat -an".
+
+ Note: in case of UNIX sockets we're only able to determine the
+ local endpoint/path, not the one it's connected to.
+ According to [1] it would be possible but not easily.
+
+ [1] http://serverfault.com/a/417946
+ """
+
+ def __init__(self):
+ # The string represents the basename of the corresponding
+ # /proc/net/{proto_name} file.
+ tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
+ tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
+ udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
+ udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
+ unix = ("unix", socket.AF_UNIX, None)
+ self.tmap = {
+ "all": (tcp4, tcp6, udp4, udp6, unix),
+ "tcp": (tcp4, tcp6),
+ "tcp4": (tcp4,),
+ "tcp6": (tcp6,),
+ "udp": (udp4, udp6),
+ "udp4": (udp4,),
+ "udp6": (udp6,),
+ "unix": (unix,),
+ "inet": (tcp4, tcp6, udp4, udp6),
+ "inet4": (tcp4, udp4),
+ "inet6": (tcp6, udp6),
+ }
+ self._procfs_path = None
+
+ def get_proc_inodes(self, pid):
+ inodes = defaultdict(list)
+ for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)):
+ try:
+ inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd))
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT == file which is gone in the meantime;
+ # os.stat('/proc/%s' % self.pid) will be done later
+ # to force NSP (if it's the case)
+ continue
+ except OSError as err:
+ if err.errno == errno.EINVAL:
+ # not a link
+ continue
+ if err.errno == errno.ENAMETOOLONG:
+ # file name too long
+ debug(err)
+ continue
+ raise
+ else:
+ if inode.startswith('socket:['):
+ # the process is using a socket
+ inode = inode[8:][:-1]
+ inodes[inode].append((pid, int(fd)))
+ return inodes
+
+ def get_all_inodes(self):
+ inodes = {}
+ for pid in pids():
+ try:
+ inodes.update(self.get_proc_inodes(pid))
+ except (FileNotFoundError, ProcessLookupError, PermissionError):
+ # os.listdir() is gonna raise a lot of access denied
+ # exceptions in case of unprivileged user; that's fine
+ # as we'll just end up returning a connection with PID
+ # and fd set to None anyway.
+ # Both netstat -an and lsof does the same so it's
+ # unlikely we can do any better.
+ # ENOENT just means a PID disappeared on us.
+ continue
+ return inodes
+
+ @staticmethod
+ def decode_address(addr, family):
+ """Accept an "ip:port" address as displayed in /proc/net/*
+ and convert it into a human readable form, like:
+
+ "0500000A:0016" -> ("10.0.0.5", 22)
+ "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
+
+ The IP address portion is a little or big endian four-byte
+ hexadecimal number; that is, the least significant byte is listed
+ first, so we need to reverse the order of the bytes to convert it
+ to an IP address.
+ The port is represented as a two-byte hexadecimal number.
+
+ Reference:
+ http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
+ """
+ ip, port = addr.split(':')
+ port = int(port, 16)
+ # this usually refers to a local socket in listen mode with
+ # no end-points connected
+ if not port:
+ return ()
+ if PY3:
+ ip = ip.encode('ascii')
+ if family == socket.AF_INET:
+ # see: https://github.com/giampaolo/psutil/issues/201
+ if LITTLE_ENDIAN:
+ ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
+ else:
+ ip = socket.inet_ntop(family, base64.b16decode(ip))
+ else: # IPv6
+ ip = base64.b16decode(ip)
+ try:
+ # see: https://github.com/giampaolo/psutil/issues/201
+ if LITTLE_ENDIAN:
+ ip = socket.inet_ntop(
+ socket.AF_INET6,
+ struct.pack('>4I', *struct.unpack('<4I', ip)))
+ else:
+ ip = socket.inet_ntop(
+ socket.AF_INET6,
+ struct.pack('<4I', *struct.unpack('<4I', ip)))
+ except ValueError:
+ # see: https://github.com/giampaolo/psutil/issues/623
+ if not supports_ipv6():
+ raise _Ipv6UnsupportedError
+ else:
+ raise
+ return _common.addr(ip, port)
+
+ @staticmethod
+ def process_inet(file, family, type_, inodes, filter_pid=None):
+ """Parse /proc/net/tcp* and /proc/net/udp* files."""
+ if file.endswith('6') and not os.path.exists(file):
+ # IPv6 not supported
+ return
+ with open_text(file) as f:
+ f.readline() # skip the first line
+ for lineno, line in enumerate(f, 1):
+ try:
+ _, laddr, raddr, status, _, _, _, _, _, inode = \
+ line.split()[:10]
+ except ValueError:
+ raise RuntimeError(
+ "error while parsing %s; malformed line %s %r" % (
+ file, lineno, line))
+ if inode in inodes:
+ # # We assume inet sockets are unique, so we error
+ # # out if there are multiple references to the
+ # # same inode. We won't do this for UNIX sockets.
+ # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
+ # raise ValueError("ambiguous inode with multiple "
+ # "PIDs references")
+ pid, fd = inodes[inode][0]
+ else:
+ pid, fd = None, -1
+ if filter_pid is not None and filter_pid != pid:
+ continue
+ else:
+ if type_ == socket.SOCK_STREAM:
+ status = TCP_STATUSES[status]
+ else:
+ status = _common.CONN_NONE
+ try:
+ laddr = Connections.decode_address(laddr, family)
+ raddr = Connections.decode_address(raddr, family)
+ except _Ipv6UnsupportedError:
+ continue
+ yield (fd, family, type_, laddr, raddr, status, pid)
+
+ @staticmethod
+ def process_unix(file, family, inodes, filter_pid=None):
+ """Parse /proc/net/unix files."""
+ with open_text(file) as f:
+ f.readline() # skip the first line
+ for line in f:
+ tokens = line.split()
+ try:
+ _, _, _, _, type_, _, inode = tokens[0:7]
+ except ValueError:
+ if ' ' not in line:
+ # see: https://github.com/giampaolo/psutil/issues/766
+ continue
+ raise RuntimeError(
+ "error while parsing %s; malformed line %r" % (
+ file, line))
+ if inode in inodes:
+ # With UNIX sockets we can have a single inode
+ # referencing many file descriptors.
+ pairs = inodes[inode]
+ else:
+ pairs = [(None, -1)]
+ for pid, fd in pairs:
+ if filter_pid is not None and filter_pid != pid:
+ continue
+ else:
+ if len(tokens) == 8:
+ path = tokens[-1]
+ else:
+ path = ""
+ type_ = _common.socktype_to_enum(int(type_))
+ # XXX: determining the remote endpoint of a
+ # UNIX socket on Linux is not possible, see:
+ # https://serverfault.com/questions/252723/
+ raddr = ""
+ status = _common.CONN_NONE
+ yield (fd, family, type_, path, raddr, status, pid)
+
+ def retrieve(self, kind, pid=None):
+ if kind not in self.tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in self.tmap])))
+ self._procfs_path = get_procfs_path()
+ if pid is not None:
+ inodes = self.get_proc_inodes(pid)
+ if not inodes:
+ # no connections for this process
+ return []
+ else:
+ inodes = self.get_all_inodes()
+ ret = set()
+ for proto_name, family, type_ in self.tmap[kind]:
+ path = "%s/net/%s" % (self._procfs_path, proto_name)
+ if family in (socket.AF_INET, socket.AF_INET6):
+ ls = self.process_inet(
+ path, family, type_, inodes, filter_pid=pid)
+ else:
+ ls = self.process_unix(
+ path, family, inodes, filter_pid=pid)
+ for fd, family, type_, laddr, raddr, status, bound_pid in ls:
+ if pid:
+ conn = _common.pconn(fd, family, type_, laddr, raddr,
+ status)
+ else:
+ conn = _common.sconn(fd, family, type_, laddr, raddr,
+ status, bound_pid)
+ ret.add(conn)
+ return list(ret)
+
+
+_connections = Connections()
+
+
+def net_connections(kind='inet'):
+ """Return system-wide open connections."""
+ return _connections.retrieve(kind)
+
+
+def net_io_counters():
+ """Return network I/O statistics for every network interface
+ installed on the system as a dict of raw tuples.
+ """
+ with open_text("%s/net/dev" % get_procfs_path()) as f:
+ lines = f.readlines()
+ retdict = {}
+ for line in lines[2:]:
+ colon = line.rfind(':')
+ assert colon > 0, repr(line)
+ name = line[:colon].strip()
+ fields = line[colon + 1:].strip().split()
+
+ # in
+ (bytes_recv,
+ packets_recv,
+ errin,
+ dropin,
+ fifoin, # unused
+ framein, # unused
+ compressedin, # unused
+ multicastin, # unused
+ # out
+ bytes_sent,
+ packets_sent,
+ errout,
+ dropout,
+ fifoout, # unused
+ collisionsout, # unused
+ carrierout, # unused
+ compressedout) = map(int, fields)
+
+ retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
+ errin, errout, dropin, dropout)
+ return retdict
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
+ cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
+ cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN}
+ names = net_io_counters().keys()
+ ret = {}
+ for name in names:
+ try:
+ mtu = cext_posix.net_if_mtu(name)
+ flags = cext_posix.net_if_flags(name)
+ duplex, speed = cext.net_if_duplex_speed(name)
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1279
+ if err.errno != errno.ENODEV:
+ raise
+ else:
+ debug(err)
+ else:
+ output_flags = ','.join(flags)
+ isup = 'running' in flags
+ ret[name] = _common.snicstats(isup, duplex_map[duplex], speed, mtu,
+ output_flags)
+ return ret
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_usage = _psposix.disk_usage
+
+
+def disk_io_counters(perdisk=False):
+ """Return disk I/O statistics for every disk installed on the
+ system as a dict of raw tuples.
+ """
+ def read_procfs():
+ # OK, this is a bit confusing. The format of /proc/diskstats can
+ # have 3 variations.
+ # On Linux 2.4 each line has always 15 fields, e.g.:
+ # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
+ # On Linux 2.6+ each line *usually* has 14 fields, and the disk
+ # name is in another position, like this:
+ # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
+ # ...unless (Linux 2.6) the line refers to a partition instead
+ # of a disk, in which case the line has less fields (7):
+ # "3 1 hda1 8 8 8 8"
+ # 4.18+ has 4 fields added:
+ # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
+ # 5.5 has 2 more fields.
+ # See:
+ # https://www.kernel.org/doc/Documentation/iostats.txt
+ # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
+ with open_text("%s/diskstats" % get_procfs_path()) as f:
+ lines = f.readlines()
+ for line in lines:
+ fields = line.split()
+ flen = len(fields)
+ if flen == 15:
+ # Linux 2.4
+ name = fields[3]
+ reads = int(fields[2])
+ (reads_merged, rbytes, rtime, writes, writes_merged,
+ wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
+ elif flen == 14 or flen >= 18:
+ # Linux 2.6+, line referring to a disk
+ name = fields[2]
+ (reads, reads_merged, rbytes, rtime, writes, writes_merged,
+ wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
+ elif flen == 7:
+ # Linux 2.6+, line referring to a partition
+ name = fields[2]
+ reads, rbytes, writes, wbytes = map(int, fields[3:])
+ rtime = wtime = reads_merged = writes_merged = busy_time = 0
+ else:
+ raise ValueError("not sure how to interpret line %r" % line)
+ yield (name, reads, writes, rbytes, wbytes, rtime, wtime,
+ reads_merged, writes_merged, busy_time)
+
+ def read_sysfs():
+ for block in os.listdir('/sys/block'):
+ for root, _, files in os.walk(os.path.join('/sys/block', block)):
+ if 'stat' not in files:
+ continue
+ with open_text(os.path.join(root, 'stat')) as f:
+ fields = f.read().strip().split()
+ name = os.path.basename(root)
+ (reads, reads_merged, rbytes, rtime, writes, writes_merged,
+ wbytes, wtime, _, busy_time) = map(int, fields[:10])
+ yield (name, reads, writes, rbytes, wbytes, rtime,
+ wtime, reads_merged, writes_merged, busy_time)
+
+ if os.path.exists('%s/diskstats' % get_procfs_path()):
+ gen = read_procfs()
+ elif os.path.exists('/sys/block'):
+ gen = read_sysfs()
+ else:
+ raise NotImplementedError(
+ "%s/diskstats nor /sys/block filesystem are available on this "
+ "system" % get_procfs_path())
+
+ retdict = {}
+ for entry in gen:
+ (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged,
+ writes_merged, busy_time) = entry
+ if not perdisk and not is_storage_device(name):
+ # perdisk=False means we want to calculate totals so we skip
+ # partitions (e.g. 'sda1', 'nvme0n1p1') and only include
+ # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks
+ # include a total of all their partitions + some extra size
+ # of their own:
+ # $ cat /proc/diskstats
+ # 259 0 sda 10485760 ...
+ # 259 1 sda1 5186039 ...
+ # 259 1 sda2 5082039 ...
+ # See:
+ # https://github.com/giampaolo/psutil/pull/1313
+ continue
+
+ rbytes *= DISK_SECTOR_SIZE
+ wbytes *= DISK_SECTOR_SIZE
+ retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime,
+ reads_merged, writes_merged, busy_time)
+
+ return retdict
+
+
+class RootFsDeviceFinder:
+ """disk_partitions() may return partitions with device == "/dev/root"
+ or "rootfs". This container class uses different strategies to try to
+ obtain the real device path. Resources:
+ https://bootlin.com/blog/find-root-device/
+ https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/
+ """
+ __slots__ = ['major', 'minor']
+
+ def __init__(self):
+ dev = os.stat("/").st_dev
+ self.major = os.major(dev)
+ self.minor = os.minor(dev)
+
+ def ask_proc_partitions(self):
+ with open_text("%s/partitions" % get_procfs_path()) as f:
+ for line in f.readlines()[2:]:
+ fields = line.split()
+ if len(fields) < 4: # just for extra safety
+ continue
+ major = int(fields[0]) if fields[0].isdigit() else None
+ minor = int(fields[1]) if fields[1].isdigit() else None
+ name = fields[3]
+ if major == self.major and minor == self.minor:
+ if name: # just for extra safety
+ return "/dev/%s" % name
+
+ def ask_sys_dev_block(self):
+ path = "/sys/dev/block/%s:%s/uevent" % (self.major, self.minor)
+ with open_text(path) as f:
+ for line in f:
+ if line.startswith("DEVNAME="):
+ name = line.strip().rpartition("DEVNAME=")[2]
+ if name: # just for extra safety
+ return "/dev/%s" % name
+
+ def ask_sys_class_block(self):
+ needle = "%s:%s" % (self.major, self.minor)
+ files = glob.iglob("/sys/class/block/*/dev")
+ for file in files:
+ try:
+ f = open_text(file)
+ except FileNotFoundError: # race condition
+ continue
+ else:
+ with f:
+ data = f.read().strip()
+ if data == needle:
+ name = os.path.basename(os.path.dirname(file))
+ return "/dev/%s" % name
+
+ def find(self):
+ path = None
+ if path is None:
+ try:
+ path = self.ask_proc_partitions()
+ except (IOError, OSError) as err:
+ debug(err)
+ if path is None:
+ try:
+ path = self.ask_sys_dev_block()
+ except (IOError, OSError) as err:
+ debug(err)
+ if path is None:
+ try:
+ path = self.ask_sys_class_block()
+ except (IOError, OSError) as err:
+ debug(err)
+ # We use exists() because the "/dev/*" part of the path is hard
+ # coded, so we want to be sure.
+ if path is not None and os.path.exists(path):
+ return path
+
+
+def disk_partitions(all=False):
+ """Return mounted disk partitions as a list of namedtuples."""
+ fstypes = set()
+ procfs_path = get_procfs_path()
+ with open_text("%s/filesystems" % procfs_path) as f:
+ for line in f:
+ line = line.strip()
+ if not line.startswith("nodev"):
+ fstypes.add(line.strip())
+ else:
+ # ignore all lines starting with "nodev" except "nodev zfs"
+ fstype = line.split("\t")[1]
+ if fstype == "zfs":
+ fstypes.add("zfs")
+
+ # See: https://github.com/giampaolo/psutil/issues/1307
+ if procfs_path == "/proc" and os.path.isfile('/etc/mtab'):
+ mounts_path = os.path.realpath("/etc/mtab")
+ else:
+ mounts_path = os.path.realpath("%s/self/mounts" % procfs_path)
+
+ retlist = []
+ partitions = cext.disk_partitions(mounts_path)
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if device in ("/dev/root", "rootfs"):
+ device = RootFsDeviceFinder().find() or device
+ if not all:
+ if device == '' or fstype not in fstypes:
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+
+ return retlist
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+def sensors_temperatures():
+ """Return hardware (CPU and others) temperatures as a dict
+ including hardware name, label, current, max and critical
+ temperatures.
+
+ Implementation notes:
+ - /sys/class/hwmon looks like the most recent interface to
+ retrieve this info, and this implementation relies on it
+ only (old distros will probably use something else)
+ - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
+ - /sys/class/thermal/thermal_zone* is another one but it's more
+ difficult to parse
+ """
+ ret = collections.defaultdict(list)
+ basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')
+ # CentOS has an intermediate /device directory:
+ # https://github.com/giampaolo/psutil/issues/971
+ # https://github.com/nicolargo/glances/issues/1060
+ basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))
+ basenames = sorted(set([x.split('_')[0] for x in basenames]))
+
+ # Only add the coretemp hwmon entries if they're not already in
+ # /sys/class/hwmon/
+ # https://github.com/giampaolo/psutil/issues/1708
+ # https://github.com/giampaolo/psutil/pull/1648
+ basenames2 = glob.glob(
+ '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')
+ repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/')
+ for name in basenames2:
+ altname = repl.sub('/sys/class/hwmon/', name)
+ if altname not in basenames:
+ basenames.append(name)
+
+ for base in basenames:
+ try:
+ path = base + '_input'
+ current = float(bcat(path)) / 1000.0
+ path = os.path.join(os.path.dirname(base), 'name')
+ unit_name = cat(path).strip()
+ except (IOError, OSError, ValueError):
+ # A lot of things can go wrong here, so let's just skip the
+ # whole entry. Sure thing is Linux's /sys/class/hwmon really
+ # is a stinky broken mess.
+ # https://github.com/giampaolo/psutil/issues/1009
+ # https://github.com/giampaolo/psutil/issues/1101
+ # https://github.com/giampaolo/psutil/issues/1129
+ # https://github.com/giampaolo/psutil/issues/1245
+ # https://github.com/giampaolo/psutil/issues/1323
+ continue
+
+ high = bcat(base + '_max', fallback=None)
+ critical = bcat(base + '_crit', fallback=None)
+ label = cat(base + '_label', fallback='').strip()
+
+ if high is not None:
+ try:
+ high = float(high) / 1000.0
+ except ValueError:
+ high = None
+ if critical is not None:
+ try:
+ critical = float(critical) / 1000.0
+ except ValueError:
+ critical = None
+
+ ret[unit_name].append((label, current, high, critical))
+
+ # Indication that no sensors were detected in /sys/class/hwmon/
+ if not basenames:
+ basenames = glob.glob('/sys/class/thermal/thermal_zone*')
+ basenames = sorted(set(basenames))
+
+ for base in basenames:
+ try:
+ path = os.path.join(base, 'temp')
+ current = float(bcat(path)) / 1000.0
+ path = os.path.join(base, 'type')
+ unit_name = cat(path).strip()
+ except (IOError, OSError, ValueError) as err:
+ debug(err)
+ continue
+
+ trip_paths = glob.glob(base + '/trip_point*')
+ trip_points = set(['_'.join(
+ os.path.basename(p).split('_')[0:3]) for p in trip_paths])
+ critical = None
+ high = None
+ for trip_point in trip_points:
+ path = os.path.join(base, trip_point + "_type")
+ trip_type = cat(path, fallback='').strip()
+ if trip_type == 'critical':
+ critical = bcat(os.path.join(base, trip_point + "_temp"),
+ fallback=None)
+ elif trip_type == 'high':
+ high = bcat(os.path.join(base, trip_point + "_temp"),
+ fallback=None)
+
+ if high is not None:
+ try:
+ high = float(high) / 1000.0
+ except ValueError:
+ high = None
+ if critical is not None:
+ try:
+ critical = float(critical) / 1000.0
+ except ValueError:
+ critical = None
+
+ ret[unit_name].append(('', current, high, critical))
+
+ return dict(ret)
+
+
+def sensors_fans():
+ """Return hardware fans info (for CPU and other peripherals) as a
+ dict including hardware label and current speed.
+
+ Implementation notes:
+ - /sys/class/hwmon looks like the most recent interface to
+ retrieve this info, and this implementation relies on it
+ only (old distros will probably use something else)
+ - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
+ """
+ ret = collections.defaultdict(list)
+ basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
+ if not basenames:
+ # CentOS has an intermediate /device directory:
+ # https://github.com/giampaolo/psutil/issues/971
+ basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
+
+ basenames = sorted(set([x.split('_')[0] for x in basenames]))
+ for base in basenames:
+ try:
+ current = int(bcat(base + '_input'))
+ except (IOError, OSError) as err:
+ debug(err)
+ continue
+ unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip()
+ label = cat(base + '_label', fallback='').strip()
+ ret[unit_name].append(_common.sfan(label, current))
+
+ return dict(ret)
+
+
+def sensors_battery():
+ """Return battery information.
+ Implementation note: it appears /sys/class/power_supply/BAT0/
+ directory structure may vary and provide files with the same
+ meaning but under different names, see:
+ https://github.com/giampaolo/psutil/issues/966
+ """
+ null = object()
+
+ def multi_bcat(*paths):
+ """Attempt to read the content of multiple files which may
+ not exist. If none of them exist return None.
+ """
+ for path in paths:
+ ret = bcat(path, fallback=null)
+ if ret != null:
+ try:
+ return int(ret)
+ except ValueError:
+ return ret.strip()
+ return None
+
+ bats = [x for x in os.listdir(POWER_SUPPLY_PATH) if x.startswith('BAT') or
+ 'battery' in x.lower()]
+ if not bats:
+ return None
+ # Get the first available battery. Usually this is "BAT0", except
+ # some rare exceptions:
+ # https://github.com/giampaolo/psutil/issues/1238
+ root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0])
+
+ # Base metrics.
+ energy_now = multi_bcat(
+ root + "/energy_now",
+ root + "/charge_now")
+ power_now = multi_bcat(
+ root + "/power_now",
+ root + "/current_now")
+ energy_full = multi_bcat(
+ root + "/energy_full",
+ root + "/charge_full")
+ time_to_empty = multi_bcat(root + "/time_to_empty_now")
+
+ # Percent. If we have energy_full the percentage will be more
+ # accurate compared to reading /capacity file (float vs. int).
+ if energy_full is not None and energy_now is not None:
+ try:
+ percent = 100.0 * energy_now / energy_full
+ except ZeroDivisionError:
+ percent = 0.0
+ else:
+ percent = int(cat(root + "/capacity", fallback=-1))
+ if percent == -1:
+ return None
+
+ # Is AC power cable plugged in?
+ # Note: AC0 is not always available and sometimes (e.g. CentOS7)
+ # it's called "AC".
+ power_plugged = None
+ online = multi_bcat(
+ os.path.join(POWER_SUPPLY_PATH, "AC0/online"),
+ os.path.join(POWER_SUPPLY_PATH, "AC/online"))
+ if online is not None:
+ power_plugged = online == 1
+ else:
+ status = cat(root + "/status", fallback="").strip().lower()
+ if status == "discharging":
+ power_plugged = False
+ elif status in ("charging", "full"):
+ power_plugged = True
+
+ # Seconds left.
+ # Note to self: we may also calculate the charging ETA as per:
+ # https://github.com/thialfihar/dotfiles/blob/
+ # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55
+ if power_plugged:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif energy_now is not None and power_now is not None:
+ try:
+ secsleft = int(energy_now / power_now * 3600)
+ except ZeroDivisionError:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ elif time_to_empty is not None:
+ secsleft = int(time_to_empty * 60)
+ if secsleft < 0:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ else:
+ secsleft = _common.POWER_TIME_UNKNOWN
+
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in (':0.0', ':0'):
+ hostname = 'localhost'
+ nt = _common.suser(user, tty or None, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+def boot_time():
+ """Return the system boot time expressed in seconds since the epoch."""
+ global BOOT_TIME
+ path = '%s/stat' % get_procfs_path()
+ with open_binary(path) as f:
+ for line in f:
+ if line.startswith(b'btime'):
+ ret = float(line.strip().split()[1])
+ BOOT_TIME = ret
+ return ret
+ raise RuntimeError(
+ "line 'btime' not found in %s" % path)
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix PID. Linux TIDs are not
+ supported (always return False).
+ """
+ if not _psposix.pid_exists(pid):
+ return False
+ else:
+ # Linux's apparently does not distinguish between PIDs and TIDs
+ # (thread IDs).
+ # listdir("/proc") won't show any TID (only PIDs) but
+ # os.stat("/proc/{tid}") will succeed if {tid} exists.
+ # os.kill() can also be passed a TID. This is quite confusing.
+ # In here we want to enforce this distinction and support PIDs
+ # only, see:
+ # https://github.com/giampaolo/psutil/issues/687
+ try:
+ # Note: already checked that this is faster than using a
+ # regular expr. Also (a lot) faster than doing
+ # 'return pid in pids()'
+ path = "%s/%s/status" % (get_procfs_path(), pid)
+ with open_binary(path) as f:
+ for line in f:
+ if line.startswith(b"Tgid:"):
+ tgid = int(line.split()[1])
+ # If tgid and pid are the same then we're
+ # dealing with a process PID.
+ return tgid == pid
+ raise ValueError("'Tgid' line not found in %s" % path)
+ except (EnvironmentError, ValueError):
+ return pid in pids()
+
+
+def ppid_map():
+ """Obtain a {pid: ppid, ...} dict for all running processes in
+ one shot. Used to speed up Process.children().
+ """
+ ret = {}
+ procfs_path = get_procfs_path()
+ for pid in pids():
+ try:
+ with open_binary("%s/%s/stat" % (procfs_path, pid)) as f:
+ data = f.read()
+ except (FileNotFoundError, ProcessLookupError):
+ # Note: we should be able to access /stat for all processes
+ # aka it's unlikely we'll bump into EPERM, which is good.
+ pass
+ else:
+ rpar = data.rfind(b')')
+ dset = data[rpar + 2:].split()
+ ppid = int(dset[1])
+ ret[pid] = ppid
+ return ret
+
+
+def wrap_exceptions(fun):
+ """Decorator which translates bare OSError and IOError exceptions
+ into NoSuchProcess and AccessDenied.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except ProcessLookupError:
+ raise NoSuchProcess(self.pid, self._name)
+ except FileNotFoundError:
+ if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)):
+ raise NoSuchProcess(self.pid, self._name)
+ # Note: zombies will keep existing under /proc until they're
+ # gone so there's no way to distinguish them in here.
+ raise
+ return wrapper
+
+
+class Process(object):
+ """Linux process implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _parse_stat_file(self):
+ """Parse /proc/{pid}/stat file and return a dict with various
+ process info.
+ Using "man proc" as a reference: where "man proc" refers to
+ position N always subtract 3 (e.g ppid position 4 in
+ 'man proc' == position 1 in here).
+ The return value is cached in case oneshot() ctx manager is
+ in use.
+ """
+ data = bcat("%s/%s/stat" % (self._procfs_path, self.pid))
+ # Process name is between parentheses. It can contain spaces and
+ # other parentheses. This is taken into account by looking for
+ # the first occurrence of "(" and the last occurrence of ")".
+ rpar = data.rfind(b')')
+ name = data[data.find(b'(') + 1:rpar]
+ fields = data[rpar + 2:].split()
+
+ ret = {}
+ ret['name'] = name
+ ret['status'] = fields[0]
+ ret['ppid'] = fields[1]
+ ret['ttynr'] = fields[4]
+ ret['utime'] = fields[11]
+ ret['stime'] = fields[12]
+ ret['children_utime'] = fields[13]
+ ret['children_stime'] = fields[14]
+ ret['create_time'] = fields[19]
+ ret['cpu_num'] = fields[36]
+ ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks'
+
+ return ret
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _read_status_file(self):
+ """Read /proc/{pid}/stat file and return its content.
+ The return value is cached in case oneshot() ctx manager is
+ in use.
+ """
+ with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f:
+ return f.read()
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _read_smaps_file(self):
+ with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid)) as f:
+ return f.read().strip()
+
+ def oneshot_enter(self):
+ self._parse_stat_file.cache_activate(self)
+ self._read_status_file.cache_activate(self)
+ self._read_smaps_file.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._parse_stat_file.cache_deactivate(self)
+ self._read_status_file.cache_deactivate(self)
+ self._read_smaps_file.cache_deactivate(self)
+
+ @wrap_exceptions
+ def name(self):
+ name = self._parse_stat_file()['name']
+ if PY3:
+ name = decode(name)
+ # XXX - gets changed later and probably needs refactoring
+ return name
+
+ def exe(self):
+ try:
+ return readlink("%s/%s/exe" % (self._procfs_path, self.pid))
+ except (FileNotFoundError, ProcessLookupError):
+ # no such file error; might be raised also if the
+ # path actually exists for system processes with
+ # low pids (about 0-20)
+ if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)):
+ return ""
+ else:
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+
+ @wrap_exceptions
+ def cmdline(self):
+ with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f:
+ data = f.read()
+ if not data:
+ # may happen in case of zombie process
+ return []
+ # 'man proc' states that args are separated by null bytes '\0'
+ # and last char is supposed to be a null byte. Nevertheless
+ # some processes may change their cmdline after being started
+ # (via setproctitle() or similar), they are usually not
+ # compliant with this rule and use spaces instead. Google
+ # Chrome process is an example. See:
+ # https://github.com/giampaolo/psutil/issues/1179
+ sep = '\x00' if data.endswith('\x00') else ' '
+ if data.endswith(sep):
+ data = data[:-1]
+ cmdline = data.split(sep)
+ # Sometimes last char is a null byte '\0' but the args are
+ # separated by spaces, see: https://github.com/giampaolo/psutil/
+ # issues/1179#issuecomment-552984549
+ if sep == '\x00' and len(cmdline) == 1 and ' ' in data:
+ cmdline = data.split(' ')
+ return cmdline
+
+ @wrap_exceptions
+ def environ(self):
+ with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f:
+ data = f.read()
+ return parse_environ_block(data)
+
+ @wrap_exceptions
+ def terminal(self):
+ tty_nr = int(self._parse_stat_file()['ttynr'])
+ tmap = _psposix.get_terminal_map()
+ try:
+ return tmap[tty_nr]
+ except KeyError:
+ return None
+
+ # May not be available on old kernels.
+ if os.path.exists('/proc/%s/io' % os.getpid()):
+ @wrap_exceptions
+ def io_counters(self):
+ fname = "%s/%s/io" % (self._procfs_path, self.pid)
+ fields = {}
+ with open_binary(fname) as f:
+ for line in f:
+ # https://github.com/giampaolo/psutil/issues/1004
+ line = line.strip()
+ if line:
+ try:
+ name, value = line.split(b': ')
+ except ValueError:
+ # https://github.com/giampaolo/psutil/issues/1004
+ continue
+ else:
+ fields[name] = int(value)
+ if not fields:
+ raise RuntimeError("%s file was empty" % fname)
+ try:
+ return pio(
+ fields[b'syscr'], # read syscalls
+ fields[b'syscw'], # write syscalls
+ fields[b'read_bytes'], # read bytes
+ fields[b'write_bytes'], # write bytes
+ fields[b'rchar'], # read chars
+ fields[b'wchar'], # write chars
+ )
+ except KeyError as err:
+ raise ValueError("%r field was not found in %s; found fields "
+ "are %r" % (err[0], fname, fields))
+
+ @wrap_exceptions
+ def cpu_times(self):
+ values = self._parse_stat_file()
+ utime = float(values['utime']) / CLOCK_TICKS
+ stime = float(values['stime']) / CLOCK_TICKS
+ children_utime = float(values['children_utime']) / CLOCK_TICKS
+ children_stime = float(values['children_stime']) / CLOCK_TICKS
+ iowait = float(values['blkio_ticks']) / CLOCK_TICKS
+ return pcputimes(utime, stime, children_utime, children_stime, iowait)
+
+ @wrap_exceptions
+ def cpu_num(self):
+ """What CPU the process is on."""
+ return int(self._parse_stat_file()['cpu_num'])
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ @wrap_exceptions
+ def create_time(self):
+ ctime = float(self._parse_stat_file()['create_time'])
+ # According to documentation, starttime is in field 21 and the
+ # unit is jiffies (clock ticks).
+ # We first divide it for clock ticks and then add uptime returning
+ # seconds since the epoch.
+ # Also use cached value if available.
+ bt = BOOT_TIME or boot_time()
+ return (ctime / CLOCK_TICKS) + bt
+
+ @wrap_exceptions
+ def memory_info(self):
+ # ============================================================
+ # | FIELD | DESCRIPTION | AKA | TOP |
+ # ============================================================
+ # | rss | resident set size | | RES |
+ # | vms | total program size | size | VIRT |
+ # | shared | shared pages (from shared mappings) | | SHR |
+ # | text | text ('code') | trs | CODE |
+ # | lib | library (unused in Linux 2.6) | lrs | |
+ # | data | data + stack | drs | DATA |
+ # | dirty | dirty pages (unused in Linux 2.6) | dt | |
+ # ============================================================
+ with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f:
+ vms, rss, shared, text, lib, data, dirty = \
+ [int(x) * PAGESIZE for x in f.readline().split()[:7]]
+ return pmem(rss, vms, shared, text, lib, data, dirty)
+
+ if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS:
+
+ @wrap_exceptions
+ def _parse_smaps_rollup(self):
+ # /proc/pid/smaps_rollup was added to Linux in 2017. Faster
+ # than /proc/pid/smaps. It reports higher PSS than */smaps
+ # (from 1k up to 200k higher; tested against all processes).
+ uss = pss = swap = 0
+ try:
+ with open_binary("{}/{}/smaps_rollup".format(
+ self._procfs_path, self.pid)) as f:
+ for line in f:
+ if line.startswith(b"Private_"):
+ # Private_Clean, Private_Dirty, Private_Hugetlb
+ uss += int(line.split()[1]) * 1024
+ elif line.startswith(b"Pss:"):
+ pss = int(line.split()[1]) * 1024
+ elif line.startswith(b"Swap:"):
+ swap = int(line.split()[1]) * 1024
+ except ProcessLookupError: # happens on readline()
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ return (uss, pss, swap)
+
+ @wrap_exceptions
+ def _parse_smaps(
+ self,
+ # Gets Private_Clean, Private_Dirty, Private_Hugetlb.
+ _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"),
+ _pss_re=re.compile(br"\nPss\:\s+(\d+)"),
+ _swap_re=re.compile(br"\nSwap\:\s+(\d+)")):
+ # /proc/pid/smaps does not exist on kernels < 2.6.14 or if
+ # CONFIG_MMU kernel configuration option is not enabled.
+
+ # Note: using 3 regexes is faster than reading the file
+ # line by line.
+ # XXX: on Python 3 the 2 regexes are 30% slower than on
+ # Python 2 though. Figure out why.
+ #
+ # You might be tempted to calculate USS by subtracting
+ # the "shared" value from the "resident" value in
+ # /proc/<pid>/statm. But at least on Linux, statm's "shared"
+ # value actually counts pages backed by files, which has
+ # little to do with whether the pages are actually shared.
+ # /proc/self/smaps on the other hand appears to give us the
+ # correct information.
+ smaps_data = self._read_smaps_file()
+ # Note: smaps file can be empty for certain processes.
+ # The code below will not crash though and will result to 0.
+ uss = sum(map(int, _private_re.findall(smaps_data))) * 1024
+ pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024
+ swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024
+ return (uss, pss, swap)
+
+ def memory_full_info(self):
+ if HAS_PROC_SMAPS_ROLLUP: # faster
+ uss, pss, swap = self._parse_smaps_rollup()
+ else:
+ uss, pss, swap = self._parse_smaps()
+ basic_mem = self.memory_info()
+ return pfullmem(*basic_mem + (uss, pss, swap))
+
+ else:
+ memory_full_info = memory_info
+
+ if HAS_PROC_SMAPS:
+
+ @wrap_exceptions
+ def memory_maps(self):
+ """Return process's mapped memory regions as a list of named
+ tuples. Fields are explained in 'man proc'; here is an updated
+ (Apr 2012) version: http://goo.gl/fmebo
+
+ /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if
+ CONFIG_MMU kernel configuration option is not enabled.
+ """
+ def get_blocks(lines, current_block):
+ data = {}
+ for line in lines:
+ fields = line.split(None, 5)
+ if not fields[0].endswith(b':'):
+ # new block section
+ yield (current_block.pop(), data)
+ current_block.append(line)
+ else:
+ try:
+ data[fields[0]] = int(fields[1]) * 1024
+ except ValueError:
+ if fields[0].startswith(b'VmFlags:'):
+ # see issue #369
+ continue
+ else:
+ raise ValueError("don't know how to inte"
+ "rpret line %r" % line)
+ yield (current_block.pop(), data)
+
+ data = self._read_smaps_file()
+ # Note: smaps file can be empty for certain processes.
+ if not data:
+ return []
+ lines = data.split(b'\n')
+ ls = []
+ first_line = lines.pop(0)
+ current_block = [first_line]
+ for header, data in get_blocks(lines, current_block):
+ hfields = header.split(None, 5)
+ try:
+ addr, perms, offset, dev, inode, path = hfields
+ except ValueError:
+ addr, perms, offset, dev, inode, path = \
+ hfields + ['']
+ if not path:
+ path = '[anon]'
+ else:
+ if PY3:
+ path = decode(path)
+ path = path.strip()
+ if (path.endswith(' (deleted)') and not
+ path_exists_strict(path)):
+ path = path[:-10]
+ ls.append((
+ decode(addr), decode(perms), path,
+ data.get(b'Rss:', 0),
+ data.get(b'Size:', 0),
+ data.get(b'Pss:', 0),
+ data.get(b'Shared_Clean:', 0),
+ data.get(b'Shared_Dirty:', 0),
+ data.get(b'Private_Clean:', 0),
+ data.get(b'Private_Dirty:', 0),
+ data.get(b'Referenced:', 0),
+ data.get(b'Anonymous:', 0),
+ data.get(b'Swap:', 0)
+ ))
+ return ls
+
+ @wrap_exceptions
+ def cwd(self):
+ try:
+ return readlink("%s/%s/cwd" % (self._procfs_path, self.pid))
+ except (FileNotFoundError, ProcessLookupError):
+ # https://github.com/giampaolo/psutil/issues/986
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+
+ @wrap_exceptions
+ def num_ctx_switches(self,
+ _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')):
+ data = self._read_status_file()
+ ctxsw = _ctxsw_re.findall(data)
+ if not ctxsw:
+ raise NotImplementedError(
+ "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
+ "lines were not found in %s/%s/status; the kernel is "
+ "probably older than 2.6.23" % (
+ self._procfs_path, self.pid))
+ else:
+ return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1]))
+
+ @wrap_exceptions
+ def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')):
+ # Note: on Python 3 using a re is faster than iterating over file
+ # line by line. On Python 2 is the exact opposite, and iterating
+ # over a file on Python 3 is slower than on Python 2.
+ data = self._read_status_file()
+ return int(_num_threads_re.findall(data)[0])
+
+ @wrap_exceptions
+ def threads(self):
+ thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid))
+ thread_ids.sort()
+ retlist = []
+ hit_enoent = False
+ for thread_id in thread_ids:
+ fname = "%s/%s/task/%s/stat" % (
+ self._procfs_path, self.pid, thread_id)
+ try:
+ with open_binary(fname) as f:
+ st = f.read().strip()
+ except (FileNotFoundError, ProcessLookupError):
+ # no such file or directory or no such process;
+ # it means thread disappeared on us
+ hit_enoent = True
+ continue
+ # ignore the first two values ("pid (exe)")
+ st = st[st.find(b')') + 2:]
+ values = st.split(b' ')
+ utime = float(values[11]) / CLOCK_TICKS
+ stime = float(values[12]) / CLOCK_TICKS
+ ntuple = _common.pthread(int(thread_id), utime, stime)
+ retlist.append(ntuple)
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def nice_get(self):
+ # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f:
+ # data = f.read()
+ # return int(data.split()[18])
+
+ # Use C implementation
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ # starting from CentOS 6.
+ if HAS_CPU_AFFINITY:
+
+ @wrap_exceptions
+ def cpu_affinity_get(self):
+ return cext.proc_cpu_affinity_get(self.pid)
+
+ def _get_eligible_cpus(
+ self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")):
+ # See: https://github.com/giampaolo/psutil/issues/956
+ data = self._read_status_file()
+ match = _re.findall(data)
+ if match:
+ return list(range(int(match[0][0]), int(match[0][1]) + 1))
+ else:
+ return list(range(len(per_cpu_times())))
+
+ @wrap_exceptions
+ def cpu_affinity_set(self, cpus):
+ try:
+ cext.proc_cpu_affinity_set(self.pid, cpus)
+ except (OSError, ValueError) as err:
+ if isinstance(err, ValueError) or err.errno == errno.EINVAL:
+ eligible_cpus = self._get_eligible_cpus()
+ all_cpus = tuple(range(len(per_cpu_times())))
+ for cpu in cpus:
+ if cpu not in all_cpus:
+ raise ValueError(
+ "invalid CPU number %r; choose between %s" % (
+ cpu, eligible_cpus))
+ if cpu not in eligible_cpus:
+ raise ValueError(
+ "CPU number %r is not eligible; choose "
+ "between %s" % (cpu, eligible_cpus))
+ raise
+
+ # only starting from kernel 2.6.13
+ if HAS_PROC_IO_PRIORITY:
+
+ @wrap_exceptions
+ def ionice_get(self):
+ ioclass, value = cext.proc_ioprio_get(self.pid)
+ if enum is not None:
+ ioclass = IOPriority(ioclass)
+ return _common.pionice(ioclass, value)
+
+ @wrap_exceptions
+ def ionice_set(self, ioclass, value):
+ if value is None:
+ value = 0
+ if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE):
+ raise ValueError("%r ioclass accepts no value" % ioclass)
+ if value < 0 or value > 7:
+ raise ValueError("value not in 0-7 range")
+ return cext.proc_ioprio_set(self.pid, ioclass, value)
+
+ if prlimit is not None:
+
+ @wrap_exceptions
+ def rlimit(self, resource_, limits=None):
+ # If pid is 0 prlimit() applies to the calling process and
+ # we don't want that. We should never get here though as
+ # PID 0 is not supported on Linux.
+ if self.pid == 0:
+ raise ValueError("can't use prlimit() against PID 0 process")
+ try:
+ if limits is None:
+ # get
+ return prlimit(self.pid, resource_)
+ else:
+ # set
+ if len(limits) != 2:
+ raise ValueError(
+ "second argument must be a (soft, hard) tuple, "
+ "got %s" % repr(limits))
+ prlimit(self.pid, resource_, limits)
+ except OSError as err:
+ if err.errno == errno.ENOSYS and pid_exists(self.pid):
+ # I saw this happening on Travis:
+ # https://travis-ci.org/giampaolo/psutil/jobs/51368273
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ raise
+
+ @wrap_exceptions
+ def status(self):
+ letter = self._parse_stat_file()['status']
+ if PY3:
+ letter = letter.decode()
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(letter, '?')
+
+ @wrap_exceptions
+ def open_files(self):
+ retlist = []
+ files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))
+ hit_enoent = False
+ for fd in files:
+ file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd)
+ try:
+ path = readlink(file)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT == file which is gone in the meantime
+ hit_enoent = True
+ continue
+ except OSError as err:
+ if err.errno == errno.EINVAL:
+ # not a link
+ continue
+ if err.errno == errno.ENAMETOOLONG:
+ # file name too long
+ debug(err)
+ continue
+ raise
+ else:
+ # If path is not an absolute there's no way to tell
+ # whether it's a regular file or not, so we skip it.
+ # A regular file is always supposed to be have an
+ # absolute path though.
+ if path.startswith('/') and isfile_strict(path):
+ # Get file position and flags.
+ file = "%s/%s/fdinfo/%s" % (
+ self._procfs_path, self.pid, fd)
+ try:
+ with open_binary(file) as f:
+ pos = int(f.readline().split()[1])
+ flags = int(f.readline().split()[1], 8)
+ except (FileNotFoundError, ProcessLookupError):
+ # fd gone in the meantime; process may
+ # still be alive
+ hit_enoent = True
+ else:
+ mode = file_flags_to_mode(flags)
+ ntuple = popenfile(
+ path, int(fd), int(pos), mode, flags)
+ retlist.append(ntuple)
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = _connections.retrieve(kind, self.pid)
+ self._assert_alive()
+ return ret
+
+ @wrap_exceptions
+ def num_fds(self):
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def ppid(self):
+ return int(self._parse_stat_file()['ppid'])
+
+ @wrap_exceptions
+ def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')):
+ data = self._read_status_file()
+ real, effective, saved = _uids_re.findall(data)[0]
+ return _common.puids(int(real), int(effective), int(saved))
+
+ @wrap_exceptions
+ def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')):
+ data = self._read_status_file()
+ real, effective, saved = _gids_re.findall(data)[0]
+ return _common.pgids(int(real), int(effective), int(saved))
diff --git a/lib/psutil/_psosx.py b/lib/psutil/_psosx.py
new file mode 100644
index 0000000..58359bc
--- /dev/null
+++ b/lib/psutil/_psosx.py
@@ -0,0 +1,543 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""macOS platform implementation."""
+
+import errno
+import functools
+import os
+from collections import namedtuple
+
+from . import _common
+from . import _psposix
+from . import _psutil_osx as cext
+from . import _psutil_posix as cext_posix
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import ZombieProcess
+from ._common import conn_tmap
+from ._common import conn_to_ntuple
+from ._common import isfile_strict
+from ._common import memoize_when_activated
+from ._common import parse_environ_block
+from ._common import usage_percent
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+
+
+__extra__all__ = []
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+PAGESIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+PROC_STATUSES = {
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+}
+
+kinfo_proc_map = dict(
+ ppid=0,
+ ruid=1,
+ euid=2,
+ suid=3,
+ rgid=4,
+ egid=5,
+ sgid=6,
+ ttynr=7,
+ ctime=8,
+ status=9,
+ name=10,
+)
+
+pidtaskinfo_map = dict(
+ cpuutime=0,
+ cpustime=1,
+ rss=2,
+ vms=3,
+ pfaults=4,
+ pageins=5,
+ numthreads=6,
+ volctxsw=7,
+)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])
+# psutil.virtual_memory()
+svmem = namedtuple(
+ 'svmem', ['total', 'available', 'percent', 'used', 'free',
+ 'active', 'inactive', 'wired'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms', 'pfaults', 'pageins'])
+# psutil.Process.memory_full_info()
+pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', ))
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """System virtual memory as a namedtuple."""
+ total, active, inactive, wired, free, speculative = cext.virtual_mem()
+ # This is how Zabbix calculate avail and used mem:
+ # https://github.com/zabbix/zabbix/blob/trunk/src/libs/zbxsysinfo/
+ # osx/memory.c
+ # Also see: https://github.com/giampaolo/psutil/issues/1277
+ avail = inactive + free
+ used = active + wired
+ # This is NOT how Zabbix calculates free mem but it matches "free"
+ # cmdline utility.
+ free -= speculative
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, used, free,
+ active, inactive, wired)
+
+
+def swap_memory():
+ """Swap system memory as a (total, used, free, sin, sout) tuple."""
+ total, used, free, sin, sout = cext.swap_mem()
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, sin, sout)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system CPU times as a namedtuple."""
+ user, nice, system, idle = cext.cpu_times()
+ return scputimes(user, nice, system, idle)
+
+
+def per_cpu_times():
+ """Return system CPU times as a named tuple"""
+ ret = []
+ for cpu_t in cext.per_cpu_times():
+ user, nice, system, idle = cpu_t
+ item = scputimes(user, nice, system, idle)
+ ret.append(item)
+ return ret
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ return cext.cpu_count_logical()
+
+
+def cpu_count_cores():
+ """Return the number of CPU cores in the system."""
+ return cext.cpu_count_cores()
+
+
+def cpu_stats():
+ ctx_switches, interrupts, soft_interrupts, syscalls, traps = \
+ cext.cpu_stats()
+ return _common.scpustats(
+ ctx_switches, interrupts, soft_interrupts, syscalls)
+
+
+def cpu_freq():
+ """Return CPU frequency.
+ On macOS per-cpu frequency is not supported.
+ Also, the returned frequency never changes, see:
+ https://arstechnica.com/civis/viewtopic.php?f=19&t=465002
+ """
+ curr, min_, max_ = cext.cpu_freq()
+ return [_common.scpufreq(curr, min_, max_)]
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_usage = _psposix.disk_usage
+disk_io_counters = cext.disk_io_counters
+
+
+def disk_partitions(all=False):
+ """Return mounted disk partitions as a list of namedtuples."""
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ if not os.path.isabs(device) or not os.path.exists(device):
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+def sensors_battery():
+ """Return battery information."""
+ try:
+ percent, minsleft, power_plugged = cext.sensors_battery()
+ except NotImplementedError:
+ # no power source - return None according to interface
+ return None
+ power_plugged = power_plugged == 1
+ if power_plugged:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif minsleft == -1:
+ secsleft = _common.POWER_TIME_UNKNOWN
+ else:
+ secsleft = minsleft * 60
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_connections(kind='inet'):
+ """System-wide network connections."""
+ # Note: on macOS this will fail with AccessDenied unless
+ # the process is owned by root.
+ ret = []
+ for pid in pids():
+ try:
+ cons = Process(pid).connections(kind)
+ except NoSuchProcess:
+ continue
+ else:
+ if cons:
+ for c in cons:
+ c = list(c) + [pid]
+ ret.append(_common.sconn(*c))
+ return ret
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ names = net_io_counters().keys()
+ ret = {}
+ for name in names:
+ try:
+ mtu = cext_posix.net_if_mtu(name)
+ flags = cext_posix.net_if_flags(name)
+ duplex, speed = cext_posix.net_if_duplex_speed(name)
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1279
+ if err.errno != errno.ENODEV:
+ raise
+ else:
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ output_flags = ','.join(flags)
+ isup = 'running' in flags
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu,
+ output_flags)
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, tty, hostname, tstamp, pid = item
+ if tty == '~':
+ continue # reboot or shutdown
+ if not tstamp:
+ continue
+ nt = _common.suser(user, tty or None, hostname or None, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ ls = cext.pids()
+ if 0 not in ls:
+ # On certain macOS versions pids() C doesn't return PID 0 but
+ # "ps" does and the process is querable via sysctl():
+ # https://travis-ci.org/giampaolo/psutil/jobs/309619941
+ try:
+ Process(0).create_time()
+ ls.insert(0, 0)
+ except NoSuchProcess:
+ pass
+ except AccessDenied:
+ ls.insert(0, 0)
+ return ls
+
+
+pid_exists = _psposix.pid_exists
+
+
+def is_zombie(pid):
+ try:
+ st = cext.proc_kinfo_oneshot(pid)[kinfo_proc_map['status']]
+ return st == cext.SZOMB
+ except Exception:
+ return False
+
+
+def wrap_exceptions(fun):
+ """Decorator which translates bare OSError exceptions into
+ NoSuchProcess and AccessDenied.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except ProcessLookupError:
+ if is_zombie(self.pid):
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ else:
+ raise NoSuchProcess(self.pid, self._name)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except cext.ZombieProcessError:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _get_kinfo_proc(self):
+ # Note: should work with all PIDs without permission issues.
+ ret = cext.proc_kinfo_oneshot(self.pid)
+ assert len(ret) == len(kinfo_proc_map)
+ return ret
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _get_pidtaskinfo(self):
+ # Note: should work for PIDs owned by user only.
+ ret = cext.proc_pidtaskinfo_oneshot(self.pid)
+ assert len(ret) == len(pidtaskinfo_map)
+ return ret
+
+ def oneshot_enter(self):
+ self._get_kinfo_proc.cache_activate(self)
+ self._get_pidtaskinfo.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._get_kinfo_proc.cache_deactivate(self)
+ self._get_pidtaskinfo.cache_deactivate(self)
+
+ @wrap_exceptions
+ def name(self):
+ name = self._get_kinfo_proc()[kinfo_proc_map['name']]
+ return name if name is not None else cext.proc_name(self.pid)
+
+ @wrap_exceptions
+ def exe(self):
+ return cext.proc_exe(self.pid)
+
+ @wrap_exceptions
+ def cmdline(self):
+ return cext.proc_cmdline(self.pid)
+
+ @wrap_exceptions
+ def environ(self):
+ return parse_environ_block(cext.proc_environ(self.pid))
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._get_kinfo_proc()[kinfo_proc_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def cwd(self):
+ return cext.proc_cwd(self.pid)
+
+ @wrap_exceptions
+ def uids(self):
+ rawtuple = self._get_kinfo_proc()
+ return _common.puids(
+ rawtuple[kinfo_proc_map['ruid']],
+ rawtuple[kinfo_proc_map['euid']],
+ rawtuple[kinfo_proc_map['suid']])
+
+ @wrap_exceptions
+ def gids(self):
+ rawtuple = self._get_kinfo_proc()
+ return _common.puids(
+ rawtuple[kinfo_proc_map['rgid']],
+ rawtuple[kinfo_proc_map['egid']],
+ rawtuple[kinfo_proc_map['sgid']])
+
+ @wrap_exceptions
+ def terminal(self):
+ tty_nr = self._get_kinfo_proc()[kinfo_proc_map['ttynr']]
+ tmap = _psposix.get_terminal_map()
+ try:
+ return tmap[tty_nr]
+ except KeyError:
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ rawtuple = self._get_pidtaskinfo()
+ return pmem(
+ rawtuple[pidtaskinfo_map['rss']],
+ rawtuple[pidtaskinfo_map['vms']],
+ rawtuple[pidtaskinfo_map['pfaults']],
+ rawtuple[pidtaskinfo_map['pageins']],
+ )
+
+ @wrap_exceptions
+ def memory_full_info(self):
+ basic_mem = self.memory_info()
+ uss = cext.proc_memory_uss(self.pid)
+ return pfullmem(*basic_mem + (uss, ))
+
+ @wrap_exceptions
+ def cpu_times(self):
+ rawtuple = self._get_pidtaskinfo()
+ return _common.pcputimes(
+ rawtuple[pidtaskinfo_map['cpuutime']],
+ rawtuple[pidtaskinfo_map['cpustime']],
+ # children user / system times are not retrievable (set to 0)
+ 0.0, 0.0)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._get_kinfo_proc()[kinfo_proc_map['ctime']]
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ # Unvoluntary value seems not to be available;
+ # getrusage() numbers seems to confirm this theory.
+ # We set it to 0.
+ vol = self._get_pidtaskinfo()[pidtaskinfo_map['volctxsw']]
+ return _common.pctxsw(vol, 0)
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._get_pidtaskinfo()[pidtaskinfo_map['numthreads']]
+
+ @wrap_exceptions
+ def open_files(self):
+ if self.pid == 0:
+ return []
+ files = []
+ rawlist = cext.proc_open_files(self.pid)
+ for path, fd in rawlist:
+ if isfile_strict(path):
+ ntuple = _common.popenfile(path, fd)
+ files.append(ntuple)
+ return files
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ if kind not in conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+ families, types = conn_tmap[kind]
+ rawlist = cext.proc_connections(self.pid, families, types)
+ ret = []
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status = item
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status,
+ TCP_STATUSES)
+ ret.append(nt)
+ return ret
+
+ @wrap_exceptions
+ def num_fds(self):
+ if self.pid == 0:
+ return 0
+ return cext.proc_num_fds(self.pid)
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
+
+ @wrap_exceptions
+ def nice_get(self):
+ return cext_posix.getpriority(self.pid)
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def status(self):
+ code = self._get_kinfo_proc()[kinfo_proc_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def threads(self):
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ return retlist
diff --git a/lib/psutil/_psposix.py b/lib/psutil/_psposix.py
new file mode 100644
index 0000000..1d250bf
--- /dev/null
+++ b/lib/psutil/_psposix.py
@@ -0,0 +1,232 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Routines common to all posix systems."""
+
+import glob
+import os
+import signal
+import sys
+import time
+
+from ._common import MACOS
+from ._common import TimeoutExpired
+from ._common import memoize
+from ._common import sdiskusage
+from ._common import usage_percent
+from ._compat import PY3
+from ._compat import ChildProcessError
+from ._compat import FileNotFoundError
+from ._compat import InterruptedError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import unicode
+
+
+if MACOS:
+ from . import _psutil_osx
+
+
+if sys.version_info >= (3, 4):
+ import enum
+else:
+ enum = None
+
+
+__all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map']
+
+
+def pid_exists(pid):
+ """Check whether pid exists in the current process table."""
+ if pid == 0:
+ # According to "man 2 kill" PID 0 has a special meaning:
+ # it refers to <<every process in the process group of the
+ # calling process>> so we don't want to go any further.
+ # If we get here it means this UNIX platform *does* have
+ # a process with id 0.
+ return True
+ try:
+ os.kill(pid, 0)
+ except ProcessLookupError:
+ return False
+ except PermissionError:
+ # EPERM clearly means there's a process to deny access to
+ return True
+ # According to "man 2 kill" possible error values are
+ # (EINVAL, EPERM, ESRCH)
+ else:
+ return True
+
+
+# Python 3.5 signals enum (contributed by me ^^):
+# https://bugs.python.org/issue21076
+if enum is not None and hasattr(signal, "Signals"):
+ Negsignal = enum.IntEnum(
+ 'Negsignal', dict([(x.name, -x.value) for x in signal.Signals]))
+
+ def negsig_to_enum(num):
+ """Convert a negative signal value to an enum."""
+ try:
+ return Negsignal(num)
+ except ValueError:
+ return num
+else: # pragma: no cover
+ def negsig_to_enum(num):
+ return num
+
+
+def wait_pid(pid, timeout=None, proc_name=None,
+ _waitpid=os.waitpid,
+ _timer=getattr(time, 'monotonic', time.time),
+ _min=min,
+ _sleep=time.sleep,
+ _pid_exists=pid_exists):
+ """Wait for a process PID to terminate.
+
+ If the process terminated normally by calling exit(3) or _exit(2),
+ or by returning from main(), the return value is the positive integer
+ passed to *exit().
+
+ If it was terminated by a signal it returns the negated value of the
+ signal which caused the termination (e.g. -SIGTERM).
+
+ If PID is not a children of os.getpid() (current process) just
+ wait until the process disappears and return None.
+
+ If PID does not exist at all return None immediately.
+
+ If *timeout* != None and process is still alive raise TimeoutExpired.
+ timeout=0 is also possible (either return immediately or raise).
+ """
+ if pid <= 0:
+ raise ValueError("can't wait for PID 0") # see "man waitpid"
+ interval = 0.0001
+ flags = 0
+ if timeout is not None:
+ flags |= os.WNOHANG
+ stop_at = _timer() + timeout
+
+ def sleep(interval):
+ # Sleep for some time and return a new increased interval.
+ if timeout is not None:
+ if _timer() >= stop_at:
+ raise TimeoutExpired(timeout, pid=pid, name=proc_name)
+ _sleep(interval)
+ return _min(interval * 2, 0.04)
+
+ # See: https://linux.die.net/man/2/waitpid
+ while True:
+ try:
+ retpid, status = os.waitpid(pid, flags)
+ except InterruptedError:
+ interval = sleep(interval)
+ except ChildProcessError:
+ # This has two meanings:
+ # - PID is not a child of os.getpid() in which case
+ # we keep polling until it's gone
+ # - PID never existed in the first place
+ # In both cases we'll eventually return None as we
+ # can't determine its exit status code.
+ while _pid_exists(pid):
+ interval = sleep(interval)
+ return
+ else:
+ if retpid == 0:
+ # WNOHANG flag was used and PID is still running.
+ interval = sleep(interval)
+ continue
+ elif os.WIFEXITED(status):
+ # Process terminated normally by calling exit(3) or _exit(2),
+ # or by returning from main(). The return value is the
+ # positive integer passed to *exit().
+ return os.WEXITSTATUS(status)
+ elif os.WIFSIGNALED(status):
+ # Process exited due to a signal. Return the negative value
+ # of that signal.
+ return negsig_to_enum(-os.WTERMSIG(status))
+ # elif os.WIFSTOPPED(status):
+ # # Process was stopped via SIGSTOP or is being traced, and
+ # # waitpid() was called with WUNTRACED flag. PID is still
+ # # alive. From now on waitpid() will keep returning (0, 0)
+ # # until the process state doesn't change.
+ # # It may make sense to catch/enable this since stopped PIDs
+ # # ignore SIGTERM.
+ # interval = sleep(interval)
+ # continue
+ # elif os.WIFCONTINUED(status):
+ # # Process was resumed via SIGCONT and waitpid() was called
+ # # with WCONTINUED flag.
+ # interval = sleep(interval)
+ # continue
+ else:
+ # Should never happen.
+ raise ValueError("unknown process exit status %r" % status)
+
+
+def disk_usage(path):
+ """Return disk usage associated with path.
+ Note: UNIX usually reserves 5% disk space which is not accessible
+ by user. In this function "total" and "used" values reflect the
+ total and used disk space whereas "free" and "percent" represent
+ the "free" and "used percent" user disk space.
+ """
+ if PY3:
+ st = os.statvfs(path)
+ else: # pragma: no cover
+ # os.statvfs() does not support unicode on Python 2:
+ # - https://github.com/giampaolo/psutil/issues/416
+ # - http://bugs.python.org/issue18695
+ try:
+ st = os.statvfs(path)
+ except UnicodeEncodeError:
+ if isinstance(path, unicode):
+ try:
+ path = path.encode(sys.getfilesystemencoding())
+ except UnicodeEncodeError:
+ pass
+ st = os.statvfs(path)
+ else:
+ raise
+
+ # Total space which is only available to root (unless changed
+ # at system level).
+ total = (st.f_blocks * st.f_frsize)
+ # Remaining free space usable by root.
+ avail_to_root = (st.f_bfree * st.f_frsize)
+ # Remaining free space usable by user.
+ avail_to_user = (st.f_bavail * st.f_frsize)
+ # Total space being used in general.
+ used = (total - avail_to_root)
+ if MACOS:
+ # see: https://github.com/giampaolo/psutil/pull/2152
+ used = _psutil_osx.disk_usage_used(path, used)
+ # Total space which is available to user (same as 'total' but
+ # for the user).
+ total_user = used + avail_to_user
+ # User usage percent compared to the total amount of space
+ # the user can use. This number would be higher if compared
+ # to root's because the user has less space (usually -5%).
+ usage_percent_user = usage_percent(used, total_user, round_=1)
+
+ # NB: the percentage is -5% than what shown by df due to
+ # reserved blocks that we are currently not considering:
+ # https://github.com/giampaolo/psutil/issues/829#issuecomment-223750462
+ return sdiskusage(
+ total=total, used=used, free=avail_to_user, percent=usage_percent_user)
+
+
+@memoize
+def get_terminal_map():
+ """Get a map of device-id -> path as a dict.
+ Used by Process.terminal()
+ """
+ ret = {}
+ ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
+ for name in ls:
+ assert name not in ret, name
+ try:
+ ret[os.stat(name).st_rdev] = name
+ except FileNotFoundError:
+ pass
+ return ret
diff --git a/lib/psutil/_pssunos.py b/lib/psutil/_pssunos.py
new file mode 100644
index 0000000..541c1aa
--- /dev/null
+++ b/lib/psutil/_pssunos.py
@@ -0,0 +1,727 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS Solaris platform implementation."""
+
+import errno
+import functools
+import os
+import socket
+import subprocess
+import sys
+from collections import namedtuple
+from socket import AF_INET
+
+from . import _common
+from . import _psposix
+from . import _psutil_posix as cext_posix
+from . import _psutil_sunos as cext
+from ._common import AF_INET6
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import ZombieProcess
+from ._common import debug
+from ._common import get_procfs_path
+from ._common import isfile_strict
+from ._common import memoize_when_activated
+from ._common import sockfam_to_enum
+from ._common import socktype_to_enum
+from ._common import usage_percent
+from ._compat import PY3
+from ._compat import FileNotFoundError
+from ._compat import PermissionError
+from ._compat import ProcessLookupError
+from ._compat import b
+
+
+__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+
+PAGE_SIZE = cext_posix.getpagesize()
+AF_LINK = cext_posix.AF_LINK
+IS_64_BIT = sys.maxsize > 2**32
+
+CONN_IDLE = "IDLE"
+CONN_BOUND = "BOUND"
+
+PROC_STATUSES = {
+ cext.SSLEEP: _common.STATUS_SLEEPING,
+ cext.SRUN: _common.STATUS_RUNNING,
+ cext.SZOMB: _common.STATUS_ZOMBIE,
+ cext.SSTOP: _common.STATUS_STOPPED,
+ cext.SIDL: _common.STATUS_IDLE,
+ cext.SONPROC: _common.STATUS_RUNNING, # same as run
+ cext.SWAIT: _common.STATUS_WAITING,
+}
+
+TCP_STATUSES = {
+ cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
+ cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
+ cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
+ cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.TCPS_CLOSED: _common.CONN_CLOSE,
+ cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.TCPS_LISTEN: _common.CONN_LISTEN,
+ cext.TCPS_CLOSING: _common.CONN_CLOSING,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+ cext.TCPS_IDLE: CONN_IDLE, # sunos specific
+ cext.TCPS_BOUND: CONN_BOUND, # sunos specific
+}
+
+proc_info_map = dict(
+ ppid=0,
+ rss=1,
+ vms=2,
+ create_time=3,
+ nice=4,
+ num_threads=5,
+ status=6,
+ ttynr=7,
+ uid=8,
+ euid=9,
+ gid=10,
+ egid=11)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.cpu_times()
+scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
+# psutil.cpu_times(percpu=True)
+pcputimes = namedtuple('pcputimes',
+ ['user', 'system', 'children_user', 'children_system'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+# psutil.Process.memory_info()
+pmem = namedtuple('pmem', ['rss', 'vms'])
+pfullmem = pmem
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple('pmmap_grouped',
+ ['path', 'rss', 'anonymous', 'locked'])
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """Report virtual memory metrics."""
+ # we could have done this with kstat, but IMHO this is good enough
+ total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
+ # note: there's no difference on Solaris
+ free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+ """Report swap memory metrics."""
+ sin, sout = cext.swap_mem()
+ # XXX
+ # we are supposed to get total/free by doing so:
+ # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
+ # usr/src/cmd/swap/swap.c
+ # ...nevertheless I can't manage to obtain the same numbers as 'swap'
+ # cmdline utility, so let's parse its output (sigh!)
+ p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
+ os.environ['PATH'], 'swap', '-l'],
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout = stdout.decode(sys.stdout.encoding)
+ if p.returncode != 0:
+ raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode)
+
+ lines = stdout.strip().split('\n')[1:]
+ if not lines:
+ raise RuntimeError('no swap device(s) configured')
+ total = free = 0
+ for line in lines:
+ line = line.split()
+ t, f = line[3:5]
+ total += int(int(t) * 512)
+ free += int(int(f) * 512)
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent,
+ sin * PAGE_SIZE, sout * PAGE_SIZE)
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system-wide CPU times as a named tuple"""
+ ret = cext.per_cpu_times()
+ return scputimes(*[sum(x) for x in zip(*ret)])
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples"""
+ ret = cext.per_cpu_times()
+ return [scputimes(*x) for x in ret]
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ try:
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+ except ValueError:
+ # mimic os.cpu_count() behavior
+ return None
+
+
+def cpu_count_cores():
+ """Return the number of CPU cores in the system."""
+ return cext.cpu_count_cores()
+
+
+def cpu_stats():
+ """Return various CPU stats as a named tuple."""
+ ctx_switches, interrupts, syscalls, traps = cext.cpu_stats()
+ soft_interrupts = 0
+ return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
+ syscalls)
+
+
+# =====================================================================
+# --- disks
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+disk_usage = _psposix.disk_usage
+
+
+def disk_partitions(all=False):
+ """Return system disk partitions."""
+ # TODO - the filtering logic should be better checked so that
+ # it tries to reflect 'df' as much as possible
+ retlist = []
+ partitions = cext.disk_partitions()
+ for partition in partitions:
+ device, mountpoint, fstype, opts = partition
+ if device == 'none':
+ device = ''
+ if not all:
+ # Differently from, say, Linux, we don't have a list of
+ # common fs types so the best we can do, AFAIK, is to
+ # filter by filesystem having a total size > 0.
+ try:
+ if not disk_usage(mountpoint).total:
+ continue
+ except OSError as err:
+ # https://github.com/giampaolo/psutil/issues/1674
+ debug("skipping %r: %s" % (mountpoint, err))
+ continue
+ maxfile = maxpath = None # set later
+ ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
+ maxfile, maxpath)
+ retlist.append(ntuple)
+ return retlist
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+net_io_counters = cext.net_io_counters
+net_if_addrs = cext_posix.net_if_addrs
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ Only INET sockets are returned (UNIX are not).
+ """
+ cmap = _common.conn_tmap.copy()
+ if _pid == -1:
+ cmap.pop('unix', 0)
+ if kind not in cmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in cmap])))
+ families, types = _common.conn_tmap[kind]
+ rawlist = cext.net_connections(_pid)
+ ret = set()
+ for item in rawlist:
+ fd, fam, type_, laddr, raddr, status, pid = item
+ if fam not in families:
+ continue
+ if type_ not in types:
+ continue
+ # TODO: refactor and use _common.conn_to_ntuple.
+ if fam in (AF_INET, AF_INET6):
+ if laddr:
+ laddr = _common.addr(*laddr)
+ if raddr:
+ raddr = _common.addr(*raddr)
+ status = TCP_STATUSES[status]
+ fam = sockfam_to_enum(fam)
+ type_ = socktype_to_enum(type_)
+ if _pid == -1:
+ nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
+ else:
+ nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
+ ret.add(nt)
+ return list(ret)
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ ret = cext.net_if_stats()
+ for name, items in ret.items():
+ isup, duplex, speed, mtu = items
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')
+ return ret
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ return cext.boot_time()
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ localhost = (':0.0', ':0')
+ for item in rawlist:
+ user, tty, hostname, tstamp, user_process, pid = item
+ # note: the underlying C function includes entries about
+ # system boot, run level and others. We might want
+ # to use them in the future.
+ if not user_process:
+ continue
+ if hostname in localhost:
+ hostname = 'localhost'
+ nt = _common.suser(user, tty, hostname, tstamp, pid)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+def pids():
+ """Returns a list of PIDs currently running on the system."""
+ return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
+
+
+def pid_exists(pid):
+ """Check for the existence of a unix pid."""
+ return _psposix.pid_exists(pid)
+
+
+def wrap_exceptions(fun):
+ """Call callable into a try/except clause and translate ENOENT,
+ EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except (FileNotFoundError, ProcessLookupError):
+ # ENOENT (no such file or directory) gets raised on open().
+ # ESRCH (no such process) can get raised on read() if
+ # process is gone in meantime.
+ if not pid_exists(self.pid):
+ raise NoSuchProcess(self.pid, self._name)
+ else:
+ raise ZombieProcess(self.pid, self._name, self._ppid)
+ except PermissionError:
+ raise AccessDenied(self.pid, self._name)
+ except OSError:
+ if self.pid == 0:
+ if 0 in pids():
+ raise AccessDenied(self.pid, self._name)
+ else:
+ raise
+ raise
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+ self._procfs_path = get_procfs_path()
+
+ def _assert_alive(self):
+ """Raise NSP if the process disappeared on us."""
+ # For those C function who do not raise NSP, possibly returning
+ # incorrect or incomplete result.
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ def oneshot_enter(self):
+ self._proc_name_and_args.cache_activate(self)
+ self._proc_basic_info.cache_activate(self)
+ self._proc_cred.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_name_and_args.cache_deactivate(self)
+ self._proc_basic_info.cache_deactivate(self)
+ self._proc_cred.cache_deactivate(self)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_name_and_args(self):
+ return cext.proc_name_and_args(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_basic_info(self):
+ if self.pid == 0 and not \
+ os.path.exists('%s/%s/psinfo' % (self._procfs_path, self.pid)):
+ raise AccessDenied(self.pid)
+ ret = cext.proc_basic_info(self.pid, self._procfs_path)
+ assert len(ret) == len(proc_info_map)
+ return ret
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def _proc_cred(self):
+ return cext.proc_cred(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def name(self):
+ # note: max len == 15
+ return self._proc_name_and_args()[0]
+
+ @wrap_exceptions
+ def exe(self):
+ try:
+ return os.readlink(
+ "%s/%s/path/a.out" % (self._procfs_path, self.pid))
+ except OSError:
+ pass # continue and guess the exe name from the cmdline
+ # Will be guessed later from cmdline but we want to explicitly
+ # invoke cmdline here in order to get an AccessDenied
+ # exception if the user has not enough privileges.
+ self.cmdline()
+ return ""
+
+ @wrap_exceptions
+ def cmdline(self):
+ return self._proc_name_and_args()[1].split(' ')
+
+ @wrap_exceptions
+ def environ(self):
+ return cext.proc_environ(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def create_time(self):
+ return self._proc_basic_info()[proc_info_map['create_time']]
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_basic_info()[proc_info_map['num_threads']]
+
+ @wrap_exceptions
+ def nice_get(self):
+ # Note #1: getpriority(3) doesn't work for realtime processes.
+ # Psinfo is what ps uses, see:
+ # https://github.com/giampaolo/psutil/issues/1194
+ return self._proc_basic_info()[proc_info_map['nice']]
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ if self.pid in (2, 3):
+ # Special case PIDs: internally setpriority(3) return ESRCH
+ # (no such process), no matter what.
+ # The process actually exists though, as it has a name,
+ # creation time, etc.
+ raise AccessDenied(self.pid, self._name)
+ return cext_posix.setpriority(self.pid, value)
+
+ @wrap_exceptions
+ def ppid(self):
+ self._ppid = self._proc_basic_info()[proc_info_map['ppid']]
+ return self._ppid
+
+ @wrap_exceptions
+ def uids(self):
+ try:
+ real, effective, saved, _, _, _ = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['uid']]
+ effective = self._proc_basic_info()[proc_info_map['euid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def gids(self):
+ try:
+ _, _, _, real, effective, saved = self._proc_cred()
+ except AccessDenied:
+ real = self._proc_basic_info()[proc_info_map['gid']]
+ effective = self._proc_basic_info()[proc_info_map['egid']]
+ saved = None
+ return _common.puids(real, effective, saved)
+
+ @wrap_exceptions
+ def cpu_times(self):
+ try:
+ times = cext.proc_cpu_times(self.pid, self._procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ times = (0.0, 0.0, 0.0, 0.0)
+ else:
+ raise
+ return _common.pcputimes(*times)
+
+ @wrap_exceptions
+ def cpu_num(self):
+ return cext.proc_cpu_num(self.pid, self._procfs_path)
+
+ @wrap_exceptions
+ def terminal(self):
+ procfs_path = self._procfs_path
+ hit_enoent = False
+ tty = wrap_exceptions(
+ self._proc_basic_info()[proc_info_map['ttynr']])
+ if tty != cext.PRNODEV:
+ for x in (0, 1, 2, 255):
+ try:
+ return os.readlink(
+ '%s/%d/path/%d' % (procfs_path, self.pid, x))
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ if hit_enoent:
+ self._assert_alive()
+
+ @wrap_exceptions
+ def cwd(self):
+ # /proc/PID/path/cwd may not be resolved by readlink() even if
+ # it exists (ls shows it). If that's the case and the process
+ # is still alive return None (we can return None also on BSD).
+ # Reference: http://goo.gl/55XgO
+ procfs_path = self._procfs_path
+ try:
+ return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid))
+ except FileNotFoundError:
+ os.stat("%s/%s" % (procfs_path, self.pid)) # raise NSP or AD
+ return None
+
+ @wrap_exceptions
+ def memory_info(self):
+ ret = self._proc_basic_info()
+ rss = ret[proc_info_map['rss']] * 1024
+ vms = ret[proc_info_map['vms']] * 1024
+ return pmem(rss, vms)
+
+ memory_full_info = memory_info
+
+ @wrap_exceptions
+ def status(self):
+ code = self._proc_basic_info()[proc_info_map['status']]
+ # XXX is '?' legit? (we're not supposed to return it anyway)
+ return PROC_STATUSES.get(code, '?')
+
+ @wrap_exceptions
+ def threads(self):
+ procfs_path = self._procfs_path
+ ret = []
+ tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid))
+ hit_enoent = False
+ for tid in tids:
+ tid = int(tid)
+ try:
+ utime, stime = cext.query_process_thread(
+ self.pid, tid, procfs_path)
+ except EnvironmentError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ continue
+ # ENOENT == thread gone in meantime
+ if err.errno == errno.ENOENT:
+ hit_enoent = True
+ continue
+ raise
+ else:
+ nt = _common.pthread(tid, utime, stime)
+ ret.append(nt)
+ if hit_enoent:
+ self._assert_alive()
+ return ret
+
+ @wrap_exceptions
+ def open_files(self):
+ retlist = []
+ hit_enoent = False
+ procfs_path = self._procfs_path
+ pathdir = '%s/%d/path' % (procfs_path, self.pid)
+ for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)):
+ path = os.path.join(pathdir, fd)
+ if os.path.islink(path):
+ try:
+ file = os.readlink(path)
+ except FileNotFoundError:
+ hit_enoent = True
+ continue
+ else:
+ if isfile_strict(file):
+ retlist.append(_common.popenfile(file, int(fd)))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ def _get_unix_sockets(self, pid):
+ """Get UNIX sockets used by process by parsing 'pfiles' output."""
+ # TODO: rewrite this in C (...but the damn netstat source code
+ # does not include this part! Argh!!)
+ cmd = "pfiles %s" % pid
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if PY3:
+ stdout, stderr = [x.decode(sys.stdout.encoding)
+ for x in (stdout, stderr)]
+ if p.returncode != 0:
+ if 'permission denied' in stderr.lower():
+ raise AccessDenied(self.pid, self._name)
+ if 'no such process' in stderr.lower():
+ raise NoSuchProcess(self.pid, self._name)
+ raise RuntimeError("%r command error\n%s" % (cmd, stderr))
+
+ lines = stdout.split('\n')[2:]
+ for i, line in enumerate(lines):
+ line = line.lstrip()
+ if line.startswith('sockname: AF_UNIX'):
+ path = line.split(' ', 2)[2]
+ type = lines[i - 2].strip()
+ if type == 'SOCK_STREAM':
+ type = socket.SOCK_STREAM
+ elif type == 'SOCK_DGRAM':
+ type = socket.SOCK_DGRAM
+ else:
+ type = -1
+ yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ ret = net_connections(kind, _pid=self.pid)
+ # The underlying C implementation retrieves all OS connections
+ # and filters them by PID. At this point we can't tell whether
+ # an empty list means there were no connections for process or
+ # process is no longer active so we force NSP in case the PID
+ # is no longer there.
+ if not ret:
+ # will raise NSP if process is gone
+ os.stat('%s/%s' % (self._procfs_path, self.pid))
+
+ # UNIX sockets
+ if kind in ('all', 'unix'):
+ ret.extend([_common.pconn(*conn) for conn in
+ self._get_unix_sockets(self.pid)])
+ return ret
+
+ nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
+ nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
+
+ @wrap_exceptions
+ def memory_maps(self):
+ def toaddr(start, end):
+ return '%s-%s' % (hex(start)[2:].strip('L'),
+ hex(end)[2:].strip('L'))
+
+ procfs_path = self._procfs_path
+ retlist = []
+ try:
+ rawlist = cext.proc_memory_maps(self.pid, procfs_path)
+ except OSError as err:
+ if err.errno == errno.EOVERFLOW and not IS_64_BIT:
+ # We may get here if we attempt to query a 64bit process
+ # with a 32bit python.
+ # Error originates from read() and also tools like "cat"
+ # fail in the same way (!).
+ # Since there simply is no way to determine CPU times we
+ # return 0.0 as a fallback. See:
+ # https://github.com/giampaolo/psutil/issues/857
+ return []
+ else:
+ raise
+ hit_enoent = False
+ for item in rawlist:
+ addr, addrsize, perm, name, rss, anon, locked = item
+ addr = toaddr(addr, addrsize)
+ if not name.startswith('['):
+ try:
+ name = os.readlink(
+ '%s/%s/path/%s' % (procfs_path, self.pid, name))
+ except OSError as err:
+ if err.errno == errno.ENOENT:
+ # sometimes the link may not be resolved by
+ # readlink() even if it exists (ls shows it).
+ # If that's the case we just return the
+ # unresolved link path.
+ # This seems an incosistency with /proc similar
+ # to: http://goo.gl/55XgO
+ name = '%s/%s/path/%s' % (procfs_path, self.pid, name)
+ hit_enoent = True
+ else:
+ raise
+ retlist.append((addr, perm, name, rss, anon, locked))
+ if hit_enoent:
+ self._assert_alive()
+ return retlist
+
+ @wrap_exceptions
+ def num_fds(self):
+ return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ return _common.pctxsw(
+ *cext.proc_num_ctx_switches(self.pid, self._procfs_path))
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ return _psposix.wait_pid(self.pid, timeout, self._name)
diff --git a/lib/psutil/_psutil_linux.abi3.so b/lib/psutil/_psutil_linux.abi3.so
new file mode 100755
index 0000000..d6aa4c1
--- /dev/null
+++ b/lib/psutil/_psutil_linux.abi3.so
Binary files differ
diff --git a/lib/psutil/_psutil_posix.abi3.so b/lib/psutil/_psutil_posix.abi3.so
new file mode 100755
index 0000000..0156ed1
--- /dev/null
+++ b/lib/psutil/_psutil_posix.abi3.so
Binary files differ
diff --git a/lib/psutil/_pswindows.py b/lib/psutil/_pswindows.py
new file mode 100644
index 0000000..49f8b05
--- /dev/null
+++ b/lib/psutil/_pswindows.py
@@ -0,0 +1,1120 @@
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Windows platform implementation."""
+
+import contextlib
+import errno
+import functools
+import os
+import signal
+import sys
+import time
+from collections import namedtuple
+
+from . import _common
+from ._common import ENCODING
+from ._common import ENCODING_ERRS
+from ._common import AccessDenied
+from ._common import NoSuchProcess
+from ._common import TimeoutExpired
+from ._common import conn_tmap
+from ._common import conn_to_ntuple
+from ._common import debug
+from ._common import isfile_strict
+from ._common import memoize
+from ._common import memoize_when_activated
+from ._common import parse_environ_block
+from ._common import usage_percent
+from ._compat import PY3
+from ._compat import long
+from ._compat import lru_cache
+from ._compat import range
+from ._compat import unicode
+from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS
+from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS
+from ._psutil_windows import HIGH_PRIORITY_CLASS
+from ._psutil_windows import IDLE_PRIORITY_CLASS
+from ._psutil_windows import NORMAL_PRIORITY_CLASS
+from ._psutil_windows import REALTIME_PRIORITY_CLASS
+
+
+try:
+ from . import _psutil_windows as cext
+except ImportError as err:
+ if str(err).lower().startswith("dll load failed") and \
+ sys.getwindowsversion()[0] < 6:
+ # We may get here if:
+ # 1) we are on an old Windows version
+ # 2) psutil was installed via pip + wheel
+ # See: https://github.com/giampaolo/psutil/issues/811
+ msg = "this Windows version is too old (< Windows Vista); "
+ msg += "psutil 3.4.2 is the latest version which supports Windows "
+ msg += "2000, XP and 2003 server"
+ raise RuntimeError(msg)
+ else:
+ raise
+
+if sys.version_info >= (3, 4):
+ import enum
+else:
+ enum = None
+
+# process priority constants, import from __init__.py:
+# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
+__extra__all__ = [
+ "win_service_iter", "win_service_get",
+ # Process priority
+ "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
+ "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", "NORMAL_PRIORITY_CLASS",
+ "REALTIME_PRIORITY_CLASS",
+ # IO priority
+ "IOPRIO_VERYLOW", "IOPRIO_LOW", "IOPRIO_NORMAL", "IOPRIO_HIGH",
+ # others
+ "CONN_DELETE_TCB", "AF_LINK",
+]
+
+
+# =====================================================================
+# --- globals
+# =====================================================================
+
+CONN_DELETE_TCB = "DELETE_TCB"
+ERROR_PARTIAL_COPY = 299
+PYPY = '__pypy__' in sys.builtin_module_names
+
+if enum is None:
+ AF_LINK = -1
+else:
+ AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1})
+ AF_LINK = AddressFamily.AF_LINK
+
+TCP_STATUSES = {
+ cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
+ cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
+ cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
+ cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
+ cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
+ cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
+ cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
+ cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
+ cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
+ cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
+ cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
+ cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
+ cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
+}
+
+if enum is not None:
+ class Priority(enum.IntEnum):
+ ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS
+ BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS
+ HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS
+ IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS
+ NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS
+ REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS
+
+ globals().update(Priority.__members__)
+
+if enum is None:
+ IOPRIO_VERYLOW = 0
+ IOPRIO_LOW = 1
+ IOPRIO_NORMAL = 2
+ IOPRIO_HIGH = 3
+else:
+ class IOPriority(enum.IntEnum):
+ IOPRIO_VERYLOW = 0
+ IOPRIO_LOW = 1
+ IOPRIO_NORMAL = 2
+ IOPRIO_HIGH = 3
+ globals().update(IOPriority.__members__)
+
+pinfo_map = dict(
+ num_handles=0,
+ ctx_switches=1,
+ user_time=2,
+ kernel_time=3,
+ create_time=4,
+ num_threads=5,
+ io_rcount=6,
+ io_wcount=7,
+ io_rbytes=8,
+ io_wbytes=9,
+ io_count_others=10,
+ io_bytes_others=11,
+ num_page_faults=12,
+ peak_wset=13,
+ wset=14,
+ peak_paged_pool=15,
+ paged_pool=16,
+ peak_non_paged_pool=17,
+ non_paged_pool=18,
+ pagefile=19,
+ peak_pagefile=20,
+ mem_private=21,
+)
+
+
+# =====================================================================
+# --- named tuples
+# =====================================================================
+
+
+# psutil.cpu_times()
+scputimes = namedtuple('scputimes',
+ ['user', 'system', 'idle', 'interrupt', 'dpc'])
+# psutil.virtual_memory()
+svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
+# psutil.Process.memory_info()
+pmem = namedtuple(
+ 'pmem', ['rss', 'vms',
+ 'num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
+ 'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
+ 'pagefile', 'peak_pagefile', 'private'])
+# psutil.Process.memory_full_info()
+pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', ))
+# psutil.Process.memory_maps(grouped=True)
+pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
+# psutil.Process.memory_maps(grouped=False)
+pmmap_ext = namedtuple(
+ 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
+# psutil.Process.io_counters()
+pio = namedtuple('pio', ['read_count', 'write_count',
+ 'read_bytes', 'write_bytes',
+ 'other_count', 'other_bytes'])
+
+
+# =====================================================================
+# --- utils
+# =====================================================================
+
+
+@lru_cache(maxsize=512)
+def convert_dos_path(s):
+ r"""Convert paths using native DOS format like:
+ "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+ into:
+ "C:\Windows\systemew\file.txt"
+ """
+ rawdrive = '\\'.join(s.split('\\')[:3])
+ driveletter = cext.QueryDosDevice(rawdrive)
+ remainder = s[len(rawdrive):]
+ return os.path.join(driveletter, remainder)
+
+
+def py2_strencode(s):
+ """Encode a unicode string to a byte string by using the default fs
+ encoding + "replace" error handler.
+ """
+ if PY3:
+ return s
+ else:
+ if isinstance(s, str):
+ return s
+ else:
+ return s.encode(ENCODING, ENCODING_ERRS)
+
+
+@memoize
+def getpagesize():
+ return cext.getpagesize()
+
+
+# =====================================================================
+# --- memory
+# =====================================================================
+
+
+def virtual_memory():
+ """System virtual memory as a namedtuple."""
+ mem = cext.virtual_mem()
+ totphys, availphys, totsys, availsys = mem
+ #
+ total = totphys
+ avail = availphys
+ free = availphys
+ used = total - avail
+ percent = usage_percent((total - avail), total, round_=1)
+ return svmem(total, avail, percent, used, free)
+
+
+def swap_memory():
+ """Swap system memory as a (total, used, free, sin, sout) tuple."""
+ mem = cext.virtual_mem()
+
+ total_phys = mem[0]
+ total_system = mem[2]
+
+ # system memory (commit total/limit) is the sum of physical and swap
+ # thus physical memory values need to be substracted to get swap values
+ total = total_system - total_phys
+ # commit total is incremented immediately (decrementing free_system)
+ # while the corresponding free physical value is not decremented until
+ # pages are accessed, so we can't use free system memory for swap.
+ # instead, we calculate page file usage based on performance counter
+ if (total > 0):
+ percentswap = cext.getpercentswap()
+ used = int(0.01 * percentswap * total)
+ else:
+ used = 0
+ free = total - used
+ percent = usage_percent(used, total, round_=1)
+ return _common.sswap(total, used, free, percent, 0, 0)
+
+
+# =====================================================================
+# --- disk
+# =====================================================================
+
+
+disk_io_counters = cext.disk_io_counters
+
+
+def disk_usage(path):
+ """Return disk usage associated with path."""
+ if PY3 and isinstance(path, bytes):
+ # XXX: do we want to use "strict"? Probably yes, in order
+ # to fail immediately. After all we are accepting input here...
+ path = path.decode(ENCODING, errors="strict")
+ total, free = cext.disk_usage(path)
+ used = total - free
+ percent = usage_percent(used, total, round_=1)
+ return _common.sdiskusage(total, used, free, percent)
+
+
+def disk_partitions(all):
+ """Return disk partitions."""
+ rawlist = cext.disk_partitions(all)
+ return [_common.sdiskpart(*x) for x in rawlist]
+
+
+# =====================================================================
+# --- CPU
+# =====================================================================
+
+
+def cpu_times():
+ """Return system CPU times as a named tuple."""
+ user, system, idle = cext.cpu_times()
+ # Internally, GetSystemTimes() is used, and it doesn't return
+ # interrupt and dpc times. cext.per_cpu_times() does, so we
+ # rely on it to get those only.
+ percpu_summed = scputimes(*[sum(n) for n in zip(*cext.per_cpu_times())])
+ return scputimes(user, system, idle,
+ percpu_summed.interrupt, percpu_summed.dpc)
+
+
+def per_cpu_times():
+ """Return system per-CPU times as a list of named tuples."""
+ ret = []
+ for user, system, idle, interrupt, dpc in cext.per_cpu_times():
+ item = scputimes(user, system, idle, interrupt, dpc)
+ ret.append(item)
+ return ret
+
+
+def cpu_count_logical():
+ """Return the number of logical CPUs in the system."""
+ return cext.cpu_count_logical()
+
+
+def cpu_count_cores():
+ """Return the number of CPU cores in the system."""
+ return cext.cpu_count_cores()
+
+
+def cpu_stats():
+ """Return CPU statistics."""
+ ctx_switches, interrupts, dpcs, syscalls = cext.cpu_stats()
+ soft_interrupts = 0
+ return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
+ syscalls)
+
+
+def cpu_freq():
+ """Return CPU frequency.
+ On Windows per-cpu frequency is not supported.
+ """
+ curr, max_ = cext.cpu_freq()
+ min_ = 0.0
+ return [_common.scpufreq(float(curr), min_, float(max_))]
+
+
+_loadavg_inititialized = False
+
+
+def getloadavg():
+ """Return the number of processes in the system run queue averaged
+ over the last 1, 5, and 15 minutes respectively as a tuple"""
+ global _loadavg_inititialized
+
+ if not _loadavg_inititialized:
+ cext.init_loadavg_counter()
+ _loadavg_inititialized = True
+
+ # Drop to 2 decimal points which is what Linux does
+ raw_loads = cext.getloadavg()
+ return tuple([round(load, 2) for load in raw_loads])
+
+
+# =====================================================================
+# --- network
+# =====================================================================
+
+
+def net_connections(kind, _pid=-1):
+ """Return socket connections. If pid == -1 return system-wide
+ connections (as opposed to connections opened by one process only).
+ """
+ if kind not in conn_tmap:
+ raise ValueError("invalid %r kind argument; choose between %s"
+ % (kind, ', '.join([repr(x) for x in conn_tmap])))
+ families, types = conn_tmap[kind]
+ rawlist = cext.net_connections(_pid, families, types)
+ ret = set()
+ for item in rawlist:
+ fd, fam, type, laddr, raddr, status, pid = item
+ nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, TCP_STATUSES,
+ pid=pid if _pid == -1 else None)
+ ret.add(nt)
+ return list(ret)
+
+
+def net_if_stats():
+ """Get NIC stats (isup, duplex, speed, mtu)."""
+ ret = {}
+ rawdict = cext.net_if_stats()
+ for name, items in rawdict.items():
+ if not PY3:
+ assert isinstance(name, unicode), type(name)
+ name = py2_strencode(name)
+ isup, duplex, speed, mtu = items
+ if hasattr(_common, 'NicDuplex'):
+ duplex = _common.NicDuplex(duplex)
+ ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')
+ return ret
+
+
+def net_io_counters():
+ """Return network I/O statistics for every network interface
+ installed on the system as a dict of raw tuples.
+ """
+ ret = cext.net_io_counters()
+ return dict([(py2_strencode(k), v) for k, v in ret.items()])
+
+
+def net_if_addrs():
+ """Return the addresses associated to each NIC."""
+ ret = []
+ for items in cext.net_if_addrs():
+ items = list(items)
+ items[0] = py2_strencode(items[0])
+ ret.append(items)
+ return ret
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+def sensors_battery():
+ """Return battery information."""
+ # For constants meaning see:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/
+ # aa373232(v=vs.85).aspx
+ acline_status, flags, percent, secsleft = cext.sensors_battery()
+ power_plugged = acline_status == 1
+ no_battery = bool(flags & 128)
+ charging = bool(flags & 8)
+
+ if no_battery:
+ return None
+ if power_plugged or charging:
+ secsleft = _common.POWER_TIME_UNLIMITED
+ elif secsleft == -1:
+ secsleft = _common.POWER_TIME_UNKNOWN
+
+ return _common.sbattery(percent, secsleft, power_plugged)
+
+
+# =====================================================================
+# --- other system functions
+# =====================================================================
+
+
+_last_btime = 0
+
+
+def boot_time():
+ """The system boot time expressed in seconds since the epoch."""
+ # This dirty hack is to adjust the precision of the returned
+ # value which may have a 1 second fluctuation, see:
+ # https://github.com/giampaolo/psutil/issues/1007
+ global _last_btime
+ ret = float(cext.boot_time())
+ if abs(ret - _last_btime) <= 1:
+ return _last_btime
+ else:
+ _last_btime = ret
+ return ret
+
+
+def users():
+ """Return currently connected users as a list of namedtuples."""
+ retlist = []
+ rawlist = cext.users()
+ for item in rawlist:
+ user, hostname, tstamp = item
+ user = py2_strencode(user)
+ nt = _common.suser(user, None, hostname, tstamp, None)
+ retlist.append(nt)
+ return retlist
+
+
+# =====================================================================
+# --- Windows services
+# =====================================================================
+
+
+def win_service_iter():
+ """Yields a list of WindowsService instances."""
+ for name, display_name in cext.winservice_enumerate():
+ yield WindowsService(py2_strencode(name), py2_strencode(display_name))
+
+
+def win_service_get(name):
+ """Open a Windows service and return it as a WindowsService instance."""
+ service = WindowsService(name, None)
+ service._display_name = service._query_config()['display_name']
+ return service
+
+
+class WindowsService(object):
+ """Represents an installed Windows service."""
+
+ def __init__(self, name, display_name):
+ self._name = name
+ self._display_name = display_name
+
+ def __str__(self):
+ details = "(name=%r, display_name=%r)" % (
+ self._name, self._display_name)
+ return "%s%s" % (self.__class__.__name__, details)
+
+ def __repr__(self):
+ return "<%s at %s>" % (self.__str__(), id(self))
+
+ def __eq__(self, other):
+ # Test for equality with another WindosService object based
+ # on name.
+ if not isinstance(other, WindowsService):
+ return NotImplemented
+ return self._name == other._name
+
+ def __ne__(self, other):
+ return not self == other
+
+ def _query_config(self):
+ with self._wrap_exceptions():
+ display_name, binpath, username, start_type = \
+ cext.winservice_query_config(self._name)
+ # XXX - update _self.display_name?
+ return dict(
+ display_name=py2_strencode(display_name),
+ binpath=py2_strencode(binpath),
+ username=py2_strencode(username),
+ start_type=py2_strencode(start_type))
+
+ def _query_status(self):
+ with self._wrap_exceptions():
+ status, pid = cext.winservice_query_status(self._name)
+ if pid == 0:
+ pid = None
+ return dict(status=status, pid=pid)
+
+ @contextlib.contextmanager
+ def _wrap_exceptions(self):
+ """Ctx manager which translates bare OSError and WindowsError
+ exceptions into NoSuchProcess and AccessDenied.
+ """
+ try:
+ yield
+ except OSError as err:
+ if is_permission_err(err):
+ raise AccessDenied(
+ pid=None, name=self._name,
+ msg="service %r is not querable (not enough privileges)" %
+ self._name)
+ elif err.winerror in (cext.ERROR_INVALID_NAME,
+ cext.ERROR_SERVICE_DOES_NOT_EXIST):
+ raise NoSuchProcess(
+ pid=None, name=self._name,
+ msg="service %r does not exist)" % self._name)
+ else:
+ raise
+
+ # config query
+
+ def name(self):
+ """The service name. This string is how a service is referenced
+ and can be passed to win_service_get() to get a new
+ WindowsService instance.
+ """
+ return self._name
+
+ def display_name(self):
+ """The service display name. The value is cached when this class
+ is instantiated.
+ """
+ return self._display_name
+
+ def binpath(self):
+ """The fully qualified path to the service binary/exe file as
+ a string, including command line arguments.
+ """
+ return self._query_config()['binpath']
+
+ def username(self):
+ """The name of the user that owns this service."""
+ return self._query_config()['username']
+
+ def start_type(self):
+ """A string which can either be "automatic", "manual" or
+ "disabled".
+ """
+ return self._query_config()['start_type']
+
+ # status query
+
+ def pid(self):
+ """The process PID, if any, else None. This can be passed
+ to Process class to control the service's process.
+ """
+ return self._query_status()['pid']
+
+ def status(self):
+ """Service status as a string."""
+ return self._query_status()['status']
+
+ def description(self):
+ """Service long description."""
+ return py2_strencode(cext.winservice_query_descr(self.name()))
+
+ # utils
+
+ def as_dict(self):
+ """Utility method retrieving all the information above as a
+ dictionary.
+ """
+ d = self._query_config()
+ d.update(self._query_status())
+ d['name'] = self.name()
+ d['display_name'] = self.display_name()
+ d['description'] = self.description()
+ return d
+
+ # actions
+ # XXX: the necessary C bindings for start() and stop() are
+ # implemented but for now I prefer not to expose them.
+ # I may change my mind in the future. Reasons:
+ # - they require Administrator privileges
+ # - can't implement a timeout for stop() (unless by using a thread,
+ # which sucks)
+ # - would require adding ServiceAlreadyStarted and
+ # ServiceAlreadyStopped exceptions, adding two new APIs.
+ # - we might also want to have modify(), which would basically mean
+ # rewriting win32serviceutil.ChangeServiceConfig, which involves a
+ # lot of stuff (and API constants which would pollute the API), see:
+ # http://pyxr.sourceforge.net/PyXR/c/python24/lib/site-packages/
+ # win32/lib/win32serviceutil.py.html#0175
+ # - psutil is typically about "read only" monitoring stuff;
+ # win_service_* APIs should only be used to retrieve a service and
+ # check whether it's running
+
+ # def start(self, timeout=None):
+ # with self._wrap_exceptions():
+ # cext.winservice_start(self.name())
+ # if timeout:
+ # giveup_at = time.time() + timeout
+ # while True:
+ # if self.status() == "running":
+ # return
+ # else:
+ # if time.time() > giveup_at:
+ # raise TimeoutExpired(timeout)
+ # else:
+ # time.sleep(.1)
+
+ # def stop(self):
+ # # Note: timeout is not implemented because it's just not
+ # # possible, see:
+ # # http://stackoverflow.com/questions/11973228/
+ # with self._wrap_exceptions():
+ # return cext.winservice_stop(self.name())
+
+
+# =====================================================================
+# --- processes
+# =====================================================================
+
+
+pids = cext.pids
+pid_exists = cext.pid_exists
+ppid_map = cext.ppid_map # used internally by Process.children()
+
+
+def is_permission_err(exc):
+ """Return True if this is a permission error."""
+ assert isinstance(exc, OSError), exc
+ # On Python 2 OSError doesn't always have 'winerror'. Sometimes
+ # it does, in which case the original exception was WindowsError
+ # (which is a subclass of OSError).
+ return exc.errno in (errno.EPERM, errno.EACCES) or \
+ getattr(exc, "winerror", -1) in (cext.ERROR_ACCESS_DENIED,
+ cext.ERROR_PRIVILEGE_NOT_HELD)
+
+
+def convert_oserror(exc, pid=None, name=None):
+ """Convert OSError into NoSuchProcess or AccessDenied."""
+ assert isinstance(exc, OSError), exc
+ if is_permission_err(exc):
+ return AccessDenied(pid=pid, name=name)
+ if exc.errno == errno.ESRCH:
+ return NoSuchProcess(pid=pid, name=name)
+ raise exc
+
+
+def wrap_exceptions(fun):
+ """Decorator which converts OSError into NoSuchProcess or AccessDenied."""
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return fun(self, *args, **kwargs)
+ except OSError as err:
+ raise convert_oserror(err, pid=self.pid, name=self._name)
+ return wrapper
+
+
+def retry_error_partial_copy(fun):
+ """Workaround for https://github.com/giampaolo/psutil/issues/875.
+ See: https://stackoverflow.com/questions/4457745#4457745
+ """
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ delay = 0.0001
+ times = 33
+ for x in range(times): # retries for roughly 1 second
+ try:
+ return fun(self, *args, **kwargs)
+ except WindowsError as _:
+ err = _
+ if err.winerror == ERROR_PARTIAL_COPY:
+ time.sleep(delay)
+ delay = min(delay * 2, 0.04)
+ continue
+ else:
+ raise
+ else:
+ msg = "%s retried %s times, converted to AccessDenied as it's " \
+ "still returning %r" % (fun, times, err)
+ raise AccessDenied(pid=self.pid, name=self._name, msg=msg)
+ return wrapper
+
+
+class Process(object):
+ """Wrapper class around underlying C implementation."""
+
+ __slots__ = ["pid", "_name", "_ppid", "_cache"]
+
+ def __init__(self, pid):
+ self.pid = pid
+ self._name = None
+ self._ppid = None
+
+ # --- oneshot() stuff
+
+ def oneshot_enter(self):
+ self._proc_info.cache_activate(self)
+ self.exe.cache_activate(self)
+
+ def oneshot_exit(self):
+ self._proc_info.cache_deactivate(self)
+ self.exe.cache_deactivate(self)
+
+ @memoize_when_activated
+ def _proc_info(self):
+ """Return multiple information about this process as a
+ raw tuple.
+ """
+ ret = cext.proc_info(self.pid)
+ assert len(ret) == len(pinfo_map)
+ return ret
+
+ def name(self):
+ """Return process name, which on Windows is always the final
+ part of the executable.
+ """
+ # This is how PIDs 0 and 4 are always represented in taskmgr
+ # and process-hacker.
+ if self.pid == 0:
+ return "System Idle Process"
+ if self.pid == 4:
+ return "System"
+ return os.path.basename(self.exe())
+
+ @wrap_exceptions
+ @memoize_when_activated
+ def exe(self):
+ if PYPY:
+ try:
+ exe = cext.proc_exe(self.pid)
+ except WindowsError as err:
+ # 24 = ERROR_TOO_MANY_OPEN_FILES. Not sure why this happens
+ # (perhaps PyPy's JIT delaying garbage collection of files?).
+ if err.errno == 24:
+ debug("%r translated into AccessDenied" % err)
+ raise AccessDenied(self.pid, self._name)
+ raise
+ else:
+ exe = cext.proc_exe(self.pid)
+ if not PY3:
+ exe = py2_strencode(exe)
+ if exe.startswith('\\'):
+ return convert_dos_path(exe)
+ return exe # May be "Registry", "MemCompression", ...
+
+ @wrap_exceptions
+ @retry_error_partial_copy
+ def cmdline(self):
+ if cext.WINVER >= cext.WINDOWS_8_1:
+ # PEB method detects cmdline changes but requires more
+ # privileges: https://github.com/giampaolo/psutil/pull/1398
+ try:
+ ret = cext.proc_cmdline(self.pid, use_peb=True)
+ except OSError as err:
+ if is_permission_err(err):
+ ret = cext.proc_cmdline(self.pid, use_peb=False)
+ else:
+ raise
+ else:
+ ret = cext.proc_cmdline(self.pid, use_peb=True)
+ if PY3:
+ return ret
+ else:
+ return [py2_strencode(s) for s in ret]
+
+ @wrap_exceptions
+ @retry_error_partial_copy
+ def environ(self):
+ ustr = cext.proc_environ(self.pid)
+ if ustr and not PY3:
+ assert isinstance(ustr, unicode), type(ustr)
+ return parse_environ_block(py2_strencode(ustr))
+
+ def ppid(self):
+ try:
+ return ppid_map()[self.pid]
+ except KeyError:
+ raise NoSuchProcess(self.pid, self._name)
+
+ def _get_raw_meminfo(self):
+ try:
+ return cext.proc_memory_info(self.pid)
+ except OSError as err:
+ if is_permission_err(err):
+ # TODO: the C ext can probably be refactored in order
+ # to get this from cext.proc_info()
+ info = self._proc_info()
+ return (
+ info[pinfo_map['num_page_faults']],
+ info[pinfo_map['peak_wset']],
+ info[pinfo_map['wset']],
+ info[pinfo_map['peak_paged_pool']],
+ info[pinfo_map['paged_pool']],
+ info[pinfo_map['peak_non_paged_pool']],
+ info[pinfo_map['non_paged_pool']],
+ info[pinfo_map['pagefile']],
+ info[pinfo_map['peak_pagefile']],
+ info[pinfo_map['mem_private']],
+ )
+ raise
+
+ @wrap_exceptions
+ def memory_info(self):
+ # on Windows RSS == WorkingSetSize and VSM == PagefileUsage.
+ # Underlying C function returns fields of PROCESS_MEMORY_COUNTERS
+ # struct.
+ t = self._get_raw_meminfo()
+ rss = t[2] # wset
+ vms = t[7] # pagefile
+ return pmem(*(rss, vms, ) + t)
+
+ @wrap_exceptions
+ def memory_full_info(self):
+ basic_mem = self.memory_info()
+ uss = cext.proc_memory_uss(self.pid)
+ uss *= getpagesize()
+ return pfullmem(*basic_mem + (uss, ))
+
+ def memory_maps(self):
+ try:
+ raw = cext.proc_memory_maps(self.pid)
+ except OSError as err:
+ # XXX - can't use wrap_exceptions decorator as we're
+ # returning a generator; probably needs refactoring.
+ raise convert_oserror(err, self.pid, self._name)
+ else:
+ for addr, perm, path, rss in raw:
+ path = convert_dos_path(path)
+ if not PY3:
+ path = py2_strencode(path)
+ addr = hex(addr)
+ yield (addr, perm, path, rss)
+
+ @wrap_exceptions
+ def kill(self):
+ return cext.proc_kill(self.pid)
+
+ @wrap_exceptions
+ def send_signal(self, sig):
+ if sig == signal.SIGTERM:
+ cext.proc_kill(self.pid)
+ # py >= 2.7
+ elif sig in (getattr(signal, "CTRL_C_EVENT", object()),
+ getattr(signal, "CTRL_BREAK_EVENT", object())):
+ os.kill(self.pid, sig)
+ else:
+ raise ValueError(
+ "only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals "
+ "are supported on Windows")
+
+ @wrap_exceptions
+ def wait(self, timeout=None):
+ if timeout is None:
+ cext_timeout = cext.INFINITE
+ else:
+ # WaitForSingleObject() expects time in milliseconds.
+ cext_timeout = int(timeout * 1000)
+
+ timer = getattr(time, 'monotonic', time.time)
+ stop_at = timer() + timeout if timeout is not None else None
+
+ try:
+ # Exit code is supposed to come from GetExitCodeProcess().
+ # May also be None if OpenProcess() failed with
+ # ERROR_INVALID_PARAMETER, meaning PID is already gone.
+ exit_code = cext.proc_wait(self.pid, cext_timeout)
+ except cext.TimeoutExpired:
+ # WaitForSingleObject() returned WAIT_TIMEOUT. Just raise.
+ raise TimeoutExpired(timeout, self.pid, self._name)
+ except cext.TimeoutAbandoned:
+ # WaitForSingleObject() returned WAIT_ABANDONED, see:
+ # https://github.com/giampaolo/psutil/issues/1224
+ # We'll just rely on the internal polling and return None
+ # when the PID disappears. Subprocess module does the same
+ # (return None):
+ # https://github.com/python/cpython/blob/
+ # be50a7b627d0aa37e08fa8e2d5568891f19903ce/
+ # Lib/subprocess.py#L1193-L1194
+ exit_code = None
+
+ # At this point WaitForSingleObject() returned WAIT_OBJECT_0,
+ # meaning the process is gone. Stupidly there are cases where
+ # its PID may still stick around so we do a further internal
+ # polling.
+ delay = 0.0001
+ while True:
+ if not pid_exists(self.pid):
+ return exit_code
+ if stop_at and timer() >= stop_at:
+ raise TimeoutExpired(timeout, pid=self.pid, name=self._name)
+ time.sleep(delay)
+ delay = min(delay * 2, 0.04) # incremental delay
+
+ @wrap_exceptions
+ def username(self):
+ if self.pid in (0, 4):
+ return 'NT AUTHORITY\\SYSTEM'
+ domain, user = cext.proc_username(self.pid)
+ return py2_strencode(domain) + '\\' + py2_strencode(user)
+
+ @wrap_exceptions
+ def create_time(self):
+ # Note: proc_times() not put under oneshot() 'cause create_time()
+ # is already cached by the main Process class.
+ try:
+ user, system, created = cext.proc_times(self.pid)
+ return created
+ except OSError as err:
+ if is_permission_err(err):
+ return self._proc_info()[pinfo_map['create_time']]
+ raise
+
+ @wrap_exceptions
+ def num_threads(self):
+ return self._proc_info()[pinfo_map['num_threads']]
+
+ @wrap_exceptions
+ def threads(self):
+ rawlist = cext.proc_threads(self.pid)
+ retlist = []
+ for thread_id, utime, stime in rawlist:
+ ntuple = _common.pthread(thread_id, utime, stime)
+ retlist.append(ntuple)
+ return retlist
+
+ @wrap_exceptions
+ def cpu_times(self):
+ try:
+ user, system, created = cext.proc_times(self.pid)
+ except OSError as err:
+ if not is_permission_err(err):
+ raise
+ info = self._proc_info()
+ user = info[pinfo_map['user_time']]
+ system = info[pinfo_map['kernel_time']]
+ # Children user/system times are not retrievable (set to 0).
+ return _common.pcputimes(user, system, 0.0, 0.0)
+
+ @wrap_exceptions
+ def suspend(self):
+ cext.proc_suspend_or_resume(self.pid, True)
+
+ @wrap_exceptions
+ def resume(self):
+ cext.proc_suspend_or_resume(self.pid, False)
+
+ @wrap_exceptions
+ @retry_error_partial_copy
+ def cwd(self):
+ if self.pid in (0, 4):
+ raise AccessDenied(self.pid, self._name)
+ # return a normalized pathname since the native C function appends
+ # "\\" at the and of the path
+ path = cext.proc_cwd(self.pid)
+ return py2_strencode(os.path.normpath(path))
+
+ @wrap_exceptions
+ def open_files(self):
+ if self.pid in (0, 4):
+ return []
+ ret = set()
+ # Filenames come in in native format like:
+ # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
+ # Convert the first part in the corresponding drive letter
+ # (e.g. "C:\") by using Windows's QueryDosDevice()
+ raw_file_names = cext.proc_open_files(self.pid)
+ for _file in raw_file_names:
+ _file = convert_dos_path(_file)
+ if isfile_strict(_file):
+ if not PY3:
+ _file = py2_strencode(_file)
+ ntuple = _common.popenfile(_file, -1)
+ ret.add(ntuple)
+ return list(ret)
+
+ @wrap_exceptions
+ def connections(self, kind='inet'):
+ return net_connections(kind, _pid=self.pid)
+
+ @wrap_exceptions
+ def nice_get(self):
+ value = cext.proc_priority_get(self.pid)
+ if enum is not None:
+ value = Priority(value)
+ return value
+
+ @wrap_exceptions
+ def nice_set(self, value):
+ return cext.proc_priority_set(self.pid, value)
+
+ @wrap_exceptions
+ def ionice_get(self):
+ ret = cext.proc_io_priority_get(self.pid)
+ if enum is not None:
+ ret = IOPriority(ret)
+ return ret
+
+ @wrap_exceptions
+ def ionice_set(self, ioclass, value):
+ if value:
+ raise TypeError("value argument not accepted on Windows")
+ if ioclass not in (IOPRIO_VERYLOW, IOPRIO_LOW, IOPRIO_NORMAL,
+ IOPRIO_HIGH):
+ raise ValueError("%s is not a valid priority" % ioclass)
+ cext.proc_io_priority_set(self.pid, ioclass)
+
+ @wrap_exceptions
+ def io_counters(self):
+ try:
+ ret = cext.proc_io_counters(self.pid)
+ except OSError as err:
+ if not is_permission_err(err):
+ raise
+ info = self._proc_info()
+ ret = (
+ info[pinfo_map['io_rcount']],
+ info[pinfo_map['io_wcount']],
+ info[pinfo_map['io_rbytes']],
+ info[pinfo_map['io_wbytes']],
+ info[pinfo_map['io_count_others']],
+ info[pinfo_map['io_bytes_others']],
+ )
+ return pio(*ret)
+
+ @wrap_exceptions
+ def status(self):
+ suspended = cext.proc_is_suspended(self.pid)
+ if suspended:
+ return _common.STATUS_STOPPED
+ else:
+ return _common.STATUS_RUNNING
+
+ @wrap_exceptions
+ def cpu_affinity_get(self):
+ def from_bitmask(x):
+ return [i for i in range(64) if (1 << i) & x]
+ bitmask = cext.proc_cpu_affinity_get(self.pid)
+ return from_bitmask(bitmask)
+
+ @wrap_exceptions
+ def cpu_affinity_set(self, value):
+ def to_bitmask(ls):
+ if not ls:
+ raise ValueError("invalid argument %r" % ls)
+ out = 0
+ for b in ls:
+ out |= 2 ** b
+ return out
+
+ # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
+ # is returned for an invalid CPU but this seems not to be true,
+ # therefore we check CPUs validy beforehand.
+ allcpus = list(range(len(per_cpu_times())))
+ for cpu in value:
+ if cpu not in allcpus:
+ if not isinstance(cpu, (int, long)):
+ raise TypeError(
+ "invalid CPU %r; an integer is required" % cpu)
+ else:
+ raise ValueError("invalid CPU %r" % cpu)
+
+ bitmask = to_bitmask(value)
+ cext.proc_cpu_affinity_set(self.pid, bitmask)
+
+ @wrap_exceptions
+ def num_handles(self):
+ try:
+ return cext.proc_num_handles(self.pid)
+ except OSError as err:
+ if is_permission_err(err):
+ return self._proc_info()[pinfo_map['num_handles']]
+ raise
+
+ @wrap_exceptions
+ def num_ctx_switches(self):
+ ctx_switches = self._proc_info()[pinfo_map['ctx_switches']]
+ # only voluntary ctx switches are supported
+ return _common.pctxsw(ctx_switches, 0)
diff --git a/lib/psutil/tests/__init__.py b/lib/psutil/tests/__init__.py
new file mode 100644
index 0000000..ec9c748
--- /dev/null
+++ b/lib/psutil/tests/__init__.py
@@ -0,0 +1,1820 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test utilities.
+"""
+
+from __future__ import print_function
+
+import atexit
+import contextlib
+import ctypes
+import errno
+import functools
+import gc
+import inspect
+import os
+import platform
+import random
+import re
+import select
+import shlex
+import shutil
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import threading
+import time
+import unittest
+import warnings
+from socket import AF_INET
+from socket import AF_INET6
+from socket import SOCK_STREAM
+
+import psutil
+from psutil import AIX
+from psutil import FREEBSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._common import bytes2human
+from psutil._common import memoize
+from psutil._common import print_color
+from psutil._common import supports_ipv6
+from psutil._compat import PY3
+from psutil._compat import FileExistsError
+from psutil._compat import FileNotFoundError
+from psutil._compat import range
+from psutil._compat import super
+from psutil._compat import u
+from psutil._compat import unicode
+from psutil._compat import which
+
+
+try:
+ from unittest import mock # py3
+except ImportError:
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ import mock # NOQA - requires "pip install mock"
+
+if sys.version_info >= (3, 4):
+ import enum
+else:
+ enum = None
+
+if POSIX:
+ from psutil._psposix import wait_pid
+
+
+__all__ = [
+ # constants
+ 'APPVEYOR', 'DEVNULL', 'GLOBAL_TIMEOUT', 'TOLERANCE_SYS_MEM', 'NO_RETRIES',
+ 'PYPY', 'PYTHON_EXE', 'ROOT_DIR', 'SCRIPTS_DIR', 'TESTFN_PREFIX',
+ 'UNICODE_SUFFIX', 'INVALID_UNICODE_SUFFIX',
+ 'CI_TESTING', 'VALID_PROC_STATUSES', 'TOLERANCE_DISK_USAGE', 'IS_64BIT',
+ "HAS_CPU_AFFINITY", "HAS_CPU_FREQ", "HAS_ENVIRON", "HAS_PROC_IO_COUNTERS",
+ "HAS_IONICE", "HAS_MEMORY_MAPS", "HAS_PROC_CPU_NUM", "HAS_RLIMIT",
+ "HAS_SENSORS_BATTERY", "HAS_BATTERY", "HAS_SENSORS_FANS",
+ "HAS_SENSORS_TEMPERATURES", "HAS_MEMORY_FULL_INFO", "MACOS_11PLUS",
+ "MACOS_12PLUS",
+ # subprocesses
+ 'pyrun', 'terminate', 'reap_children', 'spawn_testproc', 'spawn_zombie',
+ 'spawn_children_pair',
+ # threads
+ 'ThreadTask'
+ # test utils
+ 'unittest', 'skip_on_access_denied', 'skip_on_not_implemented',
+ 'retry_on_failure', 'TestMemoryLeak', 'PsutilTestCase',
+ 'process_namespace', 'system_namespace', 'print_sysinfo',
+ # install utils
+ 'install_pip', 'install_test_deps',
+ # fs utils
+ 'chdir', 'safe_rmpath', 'create_exe', 'decode_path', 'encode_path',
+ 'get_testfn',
+ # os
+ 'get_winver', 'kernel_version',
+ # sync primitives
+ 'call_until', 'wait_for_pid', 'wait_for_file',
+ # network
+ 'check_net_address',
+ 'get_free_port', 'bind_socket', 'bind_unix_socket', 'tcp_socketpair',
+ 'unix_socketpair', 'create_sockets',
+ # compat
+ 'reload_module', 'import_module_by_path',
+ # others
+ 'warn', 'copyload_shared_lib', 'is_namedtuple',
+]
+
+
+# ===================================================================
+# --- constants
+# ===================================================================
+
+# --- platforms
+
+PYPY = '__pypy__' in sys.builtin_module_names
+# whether we're running this test suite on a Continuous Integration service
+APPVEYOR = 'APPVEYOR' in os.environ
+GITHUB_ACTIONS = 'GITHUB_ACTIONS' in os.environ or 'CIBUILDWHEEL' in os.environ
+CI_TESTING = APPVEYOR or GITHUB_ACTIONS
+# are we a 64 bit process?
+IS_64BIT = sys.maxsize > 2 ** 32
+
+
+@memoize
+def macos_version():
+ version_str = platform.mac_ver()[0]
+ version = tuple(map(int, version_str.split(".")[:2]))
+ if version == (10, 16):
+ # When built against an older macOS SDK, Python will report
+ # macOS 10.16 instead of the real version.
+ version_str = subprocess.check_output(
+ [
+ sys.executable,
+ "-sS",
+ "-c",
+ "import platform; print(platform.mac_ver()[0])",
+ ],
+ env={"SYSTEM_VERSION_COMPAT": "0"},
+ universal_newlines=True,
+ )
+ version = tuple(map(int, version_str.split(".")[:2]))
+ return version
+
+
+if MACOS:
+ MACOS_11PLUS = macos_version() > (10, 15)
+ MACOS_12PLUS = macos_version() >= (12, 0)
+else:
+ MACOS_11PLUS = False
+ MACOS_12PLUS = False
+
+
+# --- configurable defaults
+
+# how many times retry_on_failure() decorator will retry
+NO_RETRIES = 10
+# bytes tolerance for system-wide related tests
+TOLERANCE_SYS_MEM = 5 * 1024 * 1024 # 5MB
+TOLERANCE_DISK_USAGE = 10 * 1024 * 1024 # 10MB
+# the timeout used in functions which have to wait
+GLOBAL_TIMEOUT = 5
+# be more tolerant if we're on CI in order to avoid false positives
+if CI_TESTING:
+ NO_RETRIES *= 3
+ GLOBAL_TIMEOUT *= 3
+ TOLERANCE_SYS_MEM *= 4
+ TOLERANCE_DISK_USAGE *= 3
+
+# --- file names
+
+# Disambiguate TESTFN for parallel testing.
+if os.name == 'java':
+ # Jython disallows @ in module names
+ TESTFN_PREFIX = '$psutil-%s-' % os.getpid()
+else:
+ TESTFN_PREFIX = '@psutil-%s-' % os.getpid()
+UNICODE_SUFFIX = u("-ƒőő")
+# An invalid unicode string.
+if PY3:
+ INVALID_UNICODE_SUFFIX = b"f\xc0\x80".decode('utf8', 'surrogateescape')
+else:
+ INVALID_UNICODE_SUFFIX = "f\xc0\x80"
+ASCII_FS = sys.getfilesystemencoding().lower() in ('ascii', 'us-ascii')
+
+# --- paths
+
+ROOT_DIR = os.path.realpath(
+ os.path.join(os.path.dirname(__file__), '..', '..'))
+SCRIPTS_DIR = os.path.join(ROOT_DIR, 'scripts')
+HERE = os.path.realpath(os.path.dirname(__file__))
+
+# --- support
+
+HAS_CONNECTIONS_UNIX = POSIX and not SUNOS
+HAS_CPU_AFFINITY = hasattr(psutil.Process, "cpu_affinity")
+HAS_CPU_FREQ = hasattr(psutil, "cpu_freq")
+HAS_GETLOADAVG = hasattr(psutil, "getloadavg")
+HAS_ENVIRON = hasattr(psutil.Process, "environ")
+HAS_IONICE = hasattr(psutil.Process, "ionice")
+HAS_MEMORY_MAPS = hasattr(psutil.Process, "memory_maps")
+HAS_NET_IO_COUNTERS = hasattr(psutil, "net_io_counters")
+HAS_PROC_CPU_NUM = hasattr(psutil.Process, "cpu_num")
+HAS_PROC_IO_COUNTERS = hasattr(psutil.Process, "io_counters")
+HAS_RLIMIT = hasattr(psutil.Process, "rlimit")
+HAS_SENSORS_BATTERY = hasattr(psutil, "sensors_battery")
+try:
+ HAS_BATTERY = HAS_SENSORS_BATTERY and bool(psutil.sensors_battery())
+except Exception:
+ HAS_BATTERY = False
+HAS_SENSORS_FANS = hasattr(psutil, "sensors_fans")
+HAS_SENSORS_TEMPERATURES = hasattr(psutil, "sensors_temperatures")
+HAS_THREADS = hasattr(psutil.Process, "threads")
+SKIP_SYSCONS = (MACOS or AIX) and os.getuid() != 0
+
+# --- misc
+
+
+def _get_py_exe():
+ def attempt(exe):
+ try:
+ subprocess.check_call(
+ [exe, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except Exception:
+ return None
+ else:
+ return exe
+
+ if GITHUB_ACTIONS:
+ if PYPY:
+ return which("pypy3") if PY3 else which("pypy")
+ elif FREEBSD:
+ return os.path.realpath(sys.executable)
+ else:
+ return which('python')
+ elif MACOS:
+ exe = \
+ attempt(sys.executable) or \
+ attempt(os.path.realpath(sys.executable)) or \
+ attempt(which("python%s.%s" % sys.version_info[:2])) or \
+ attempt(psutil.Process().exe())
+ if not exe:
+ raise ValueError("can't find python exe real abspath")
+ return exe
+ else:
+ exe = os.path.realpath(sys.executable)
+ assert os.path.exists(exe), exe
+ return exe
+
+
+PYTHON_EXE = _get_py_exe()
+DEVNULL = open(os.devnull, 'r+')
+atexit.register(DEVNULL.close)
+
+VALID_PROC_STATUSES = [getattr(psutil, x) for x in dir(psutil)
+ if x.startswith('STATUS_')]
+AF_UNIX = getattr(socket, "AF_UNIX", object())
+
+_subprocesses_started = set()
+_pids_started = set()
+
+
+# ===================================================================
+# --- threads
+# ===================================================================
+
+
+class ThreadTask(threading.Thread):
+ """A thread task which does nothing expect staying alive."""
+
+ def __init__(self):
+ super().__init__()
+ self._running = False
+ self._interval = 0.001
+ self._flag = threading.Event()
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ return '<%s running=%s at %#x>' % (name, self._running, id(self))
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.stop()
+
+ def start(self):
+ """Start thread and keep it running until an explicit
+ stop() request. Polls for shutdown every 'timeout' seconds.
+ """
+ if self._running:
+ raise ValueError("already started")
+ threading.Thread.start(self)
+ self._flag.wait()
+
+ def run(self):
+ self._running = True
+ self._flag.set()
+ while self._running:
+ time.sleep(self._interval)
+
+ def stop(self):
+ """Stop thread execution and and waits until it is stopped."""
+ if not self._running:
+ raise ValueError("already stopped")
+ self._running = False
+ self.join()
+
+
+# ===================================================================
+# --- subprocesses
+# ===================================================================
+
+
+def _reap_children_on_err(fun):
+ @functools.wraps(fun)
+ def wrapper(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except Exception:
+ reap_children()
+ raise
+ return wrapper
+
+
+@_reap_children_on_err
+def spawn_testproc(cmd=None, **kwds):
+ """Creates a python subprocess which does nothing for 60 secs and
+ return it as a subprocess.Popen instance.
+ If "cmd" is specified that is used instead of python.
+ By default stdin and stdout are redirected to /dev/null.
+ It also attempts to make sure the process is in a reasonably
+ initialized state.
+ The process is registered for cleanup on reap_children().
+ """
+ kwds.setdefault("stdin", DEVNULL)
+ kwds.setdefault("stdout", DEVNULL)
+ kwds.setdefault("cwd", os.getcwd())
+ kwds.setdefault("env", os.environ)
+ if WINDOWS:
+ # Prevents the subprocess to open error dialogs. This will also
+ # cause stderr to be suppressed, which is suboptimal in order
+ # to debug broken tests.
+ CREATE_NO_WINDOW = 0x8000000
+ kwds.setdefault("creationflags", CREATE_NO_WINDOW)
+ if cmd is None:
+ testfn = get_testfn()
+ try:
+ safe_rmpath(testfn)
+ pyline = "from time import sleep;" \
+ "open(r'%s', 'w').close();" \
+ "sleep(60);" % testfn
+ cmd = [PYTHON_EXE, "-c", pyline]
+ sproc = subprocess.Popen(cmd, **kwds)
+ _subprocesses_started.add(sproc)
+ wait_for_file(testfn, delete=True, empty=True)
+ finally:
+ safe_rmpath(testfn)
+ else:
+ sproc = subprocess.Popen(cmd, **kwds)
+ _subprocesses_started.add(sproc)
+ wait_for_pid(sproc.pid)
+ return sproc
+
+
+@_reap_children_on_err
+def spawn_children_pair():
+ """Create a subprocess which creates another one as in:
+ A (us) -> B (child) -> C (grandchild).
+ Return a (child, grandchild) tuple.
+ The 2 processes are fully initialized and will live for 60 secs
+ and are registered for cleanup on reap_children().
+ """
+ tfile = None
+ testfn = get_testfn(dir=os.getcwd())
+ try:
+ s = textwrap.dedent("""\
+ import subprocess, os, sys, time
+ s = "import os, time;"
+ s += "f = open('%s', 'w');"
+ s += "f.write(str(os.getpid()));"
+ s += "f.close();"
+ s += "time.sleep(60);"
+ p = subprocess.Popen([r'%s', '-c', s])
+ p.wait()
+ """ % (os.path.basename(testfn), PYTHON_EXE))
+ # On Windows if we create a subprocess with CREATE_NO_WINDOW flag
+ # set (which is the default) a "conhost.exe" extra process will be
+ # spawned as a child. We don't want that.
+ if WINDOWS:
+ subp, tfile = pyrun(s, creationflags=0)
+ else:
+ subp, tfile = pyrun(s)
+ child = psutil.Process(subp.pid)
+ grandchild_pid = int(wait_for_file(testfn, delete=True, empty=False))
+ _pids_started.add(grandchild_pid)
+ grandchild = psutil.Process(grandchild_pid)
+ return (child, grandchild)
+ finally:
+ safe_rmpath(testfn)
+ if tfile is not None:
+ safe_rmpath(tfile)
+
+
+def spawn_zombie():
+ """Create a zombie process and return a (parent, zombie) process tuple.
+ In order to kill the zombie parent must be terminate()d first, then
+ zombie must be wait()ed on.
+ """
+ assert psutil.POSIX
+ unix_file = get_testfn()
+ src = textwrap.dedent("""\
+ import os, sys, time, socket, contextlib
+ child_pid = os.fork()
+ if child_pid > 0:
+ time.sleep(3000)
+ else:
+ # this is the zombie process
+ s = socket.socket(socket.AF_UNIX)
+ with contextlib.closing(s):
+ s.connect('%s')
+ if sys.version_info < (3, ):
+ pid = str(os.getpid())
+ else:
+ pid = bytes(str(os.getpid()), 'ascii')
+ s.sendall(pid)
+ """ % unix_file)
+ tfile = None
+ sock = bind_unix_socket(unix_file)
+ try:
+ sock.settimeout(GLOBAL_TIMEOUT)
+ parent, tfile = pyrun(src)
+ conn, _ = sock.accept()
+ try:
+ select.select([conn.fileno()], [], [], GLOBAL_TIMEOUT)
+ zpid = int(conn.recv(1024))
+ _pids_started.add(zpid)
+ zombie = psutil.Process(zpid)
+ call_until(lambda: zombie.status(), "ret == psutil.STATUS_ZOMBIE")
+ return (parent, zombie)
+ finally:
+ conn.close()
+ finally:
+ sock.close()
+ safe_rmpath(unix_file)
+ if tfile is not None:
+ safe_rmpath(tfile)
+
+
+@_reap_children_on_err
+def pyrun(src, **kwds):
+ """Run python 'src' code string in a separate interpreter.
+ Returns a subprocess.Popen instance and the test file where the source
+ code was written.
+ """
+ kwds.setdefault("stdout", None)
+ kwds.setdefault("stderr", None)
+ srcfile = get_testfn()
+ try:
+ with open(srcfile, 'wt') as f:
+ f.write(src)
+ subp = spawn_testproc([PYTHON_EXE, f.name], **kwds)
+ wait_for_pid(subp.pid)
+ return (subp, srcfile)
+ except Exception:
+ safe_rmpath(srcfile)
+ raise
+
+
+@_reap_children_on_err
+def sh(cmd, **kwds):
+ """run cmd in a subprocess and return its output.
+ raises RuntimeError on error.
+ """
+ # Prevents subprocess to open error dialogs in case of error.
+ flags = 0x8000000 if WINDOWS else 0
+ kwds.setdefault("stdout", subprocess.PIPE)
+ kwds.setdefault("stderr", subprocess.PIPE)
+ kwds.setdefault("universal_newlines", True)
+ kwds.setdefault("creationflags", flags)
+ if isinstance(cmd, str):
+ cmd = shlex.split(cmd)
+ p = subprocess.Popen(cmd, **kwds)
+ _subprocesses_started.add(p)
+ if PY3:
+ stdout, stderr = p.communicate(timeout=GLOBAL_TIMEOUT)
+ else:
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError(stderr)
+ if stderr:
+ warn(stderr)
+ if stdout.endswith('\n'):
+ stdout = stdout[:-1]
+ return stdout
+
+
+def terminate(proc_or_pid, sig=signal.SIGTERM, wait_timeout=GLOBAL_TIMEOUT):
+ """Terminate a process and wait() for it.
+ Process can be a PID or an instance of psutil.Process(),
+ subprocess.Popen() or psutil.Popen().
+ If it's a subprocess.Popen() or psutil.Popen() instance also closes
+ its stdin / stdout / stderr fds.
+ PID is wait()ed even if the process is already gone (kills zombies).
+ Does nothing if the process does not exist.
+ Return process exit status.
+ """
+ def wait(proc, timeout):
+ if isinstance(proc, subprocess.Popen) and not PY3:
+ proc.wait()
+ else:
+ proc.wait(timeout)
+ if WINDOWS and isinstance(proc, subprocess.Popen):
+ # Otherwise PID may still hang around.
+ try:
+ return psutil.Process(proc.pid).wait(timeout)
+ except psutil.NoSuchProcess:
+ pass
+
+ def sendsig(proc, sig):
+ # XXX: otherwise the build hangs for some reason.
+ if MACOS and GITHUB_ACTIONS:
+ sig = signal.SIGKILL
+ # If the process received SIGSTOP, SIGCONT is necessary first,
+ # otherwise SIGTERM won't work.
+ if POSIX and sig != signal.SIGKILL:
+ proc.send_signal(signal.SIGCONT)
+ proc.send_signal(sig)
+
+ def term_subprocess_proc(proc, timeout):
+ try:
+ sendsig(proc, sig)
+ except OSError as err:
+ if WINDOWS and err.winerror == 6: # "invalid handle"
+ pass
+ elif err.errno != errno.ESRCH:
+ raise
+ return wait(proc, timeout)
+
+ def term_psutil_proc(proc, timeout):
+ try:
+ sendsig(proc, sig)
+ except psutil.NoSuchProcess:
+ pass
+ return wait(proc, timeout)
+
+ def term_pid(pid, timeout):
+ try:
+ proc = psutil.Process(pid)
+ except psutil.NoSuchProcess:
+ # Needed to kill zombies.
+ if POSIX:
+ return wait_pid(pid, timeout)
+ else:
+ return term_psutil_proc(proc, timeout)
+
+ def flush_popen(proc):
+ if proc.stdout:
+ proc.stdout.close()
+ if proc.stderr:
+ proc.stderr.close()
+ # Flushing a BufferedWriter may raise an error.
+ if proc.stdin:
+ proc.stdin.close()
+
+ p = proc_or_pid
+ try:
+ if isinstance(p, int):
+ return term_pid(p, wait_timeout)
+ elif isinstance(p, (psutil.Process, psutil.Popen)):
+ return term_psutil_proc(p, wait_timeout)
+ elif isinstance(p, subprocess.Popen):
+ return term_subprocess_proc(p, wait_timeout)
+ else:
+ raise TypeError("wrong type %r" % p)
+ finally:
+ if isinstance(p, (subprocess.Popen, psutil.Popen)):
+ flush_popen(p)
+ pid = p if isinstance(p, int) else p.pid
+ assert not psutil.pid_exists(pid), pid
+
+
+def reap_children(recursive=False):
+ """Terminate and wait() any subprocess started by this test suite
+ and any children currently running, ensuring that no processes stick
+ around to hog resources.
+ If recursive is True it also tries to terminate and wait()
+ all grandchildren started by this process.
+ """
+ # Get the children here before terminating them, as in case of
+ # recursive=True we don't want to lose the intermediate reference
+ # pointing to the grandchildren.
+ children = psutil.Process().children(recursive=recursive)
+
+ # Terminate subprocess.Popen.
+ while _subprocesses_started:
+ subp = _subprocesses_started.pop()
+ terminate(subp)
+
+ # Collect started pids.
+ while _pids_started:
+ pid = _pids_started.pop()
+ terminate(pid)
+
+ # Terminate children.
+ if children:
+ for p in children:
+ terminate(p, wait_timeout=None)
+ gone, alive = psutil.wait_procs(children, timeout=GLOBAL_TIMEOUT)
+ for p in alive:
+ warn("couldn't terminate process %r; attempting kill()" % p)
+ terminate(p, sig=signal.SIGKILL)
+
+
+# ===================================================================
+# --- OS
+# ===================================================================
+
+
+def kernel_version():
+ """Return a tuple such as (2, 6, 36)."""
+ if not POSIX:
+ raise NotImplementedError("not POSIX")
+ s = ""
+ uname = os.uname()[2]
+ for c in uname:
+ if c.isdigit() or c == '.':
+ s += c
+ else:
+ break
+ if not s:
+ raise ValueError("can't parse %r" % uname)
+ minor = 0
+ micro = 0
+ nums = s.split('.')
+ major = int(nums[0])
+ if len(nums) >= 2:
+ minor = int(nums[1])
+ if len(nums) >= 3:
+ micro = int(nums[2])
+ return (major, minor, micro)
+
+
+def get_winver():
+ if not WINDOWS:
+ raise NotImplementedError("not WINDOWS")
+ wv = sys.getwindowsversion()
+ if hasattr(wv, 'service_pack_major'): # python >= 2.7
+ sp = wv.service_pack_major or 0
+ else:
+ r = re.search(r"\s\d$", wv[4])
+ if r:
+ sp = int(r.group(0))
+ else:
+ sp = 0
+ return (wv[0], wv[1], sp)
+
+
+# ===================================================================
+# --- sync primitives
+# ===================================================================
+
+
+class retry(object):
+ """A retry decorator."""
+
+ def __init__(self,
+ exception=Exception,
+ timeout=None,
+ retries=None,
+ interval=0.001,
+ logfun=None,
+ ):
+ if timeout and retries:
+ raise ValueError("timeout and retries args are mutually exclusive")
+ self.exception = exception
+ self.timeout = timeout
+ self.retries = retries
+ self.interval = interval
+ self.logfun = logfun
+
+ def __iter__(self):
+ if self.timeout:
+ stop_at = time.time() + self.timeout
+ while time.time() < stop_at:
+ yield
+ elif self.retries:
+ for _ in range(self.retries):
+ yield
+ else:
+ while True:
+ yield
+
+ def sleep(self):
+ if self.interval is not None:
+ time.sleep(self.interval)
+
+ def __call__(self, fun):
+ @functools.wraps(fun)
+ def wrapper(*args, **kwargs):
+ exc = None
+ for _ in self:
+ try:
+ return fun(*args, **kwargs)
+ except self.exception as _: # NOQA
+ exc = _
+ if self.logfun is not None:
+ self.logfun(exc)
+ self.sleep()
+ continue
+ if PY3:
+ raise exc
+ else:
+ raise
+
+ # This way the user of the decorated function can change config
+ # parameters.
+ wrapper.decorator = self
+ return wrapper
+
+
+@retry(exception=psutil.NoSuchProcess, logfun=None, timeout=GLOBAL_TIMEOUT,
+ interval=0.001)
+def wait_for_pid(pid):
+ """Wait for pid to show up in the process list then return.
+ Used in the test suite to give time the sub process to initialize.
+ """
+ psutil.Process(pid)
+ if WINDOWS:
+ # give it some more time to allow better initialization
+ time.sleep(0.01)
+
+
+@retry(exception=(FileNotFoundError, AssertionError), logfun=None,
+ timeout=GLOBAL_TIMEOUT, interval=0.001)
+def wait_for_file(fname, delete=True, empty=False):
+ """Wait for a file to be written on disk with some content."""
+ with open(fname, "rb") as f:
+ data = f.read()
+ if not empty:
+ assert data
+ if delete:
+ safe_rmpath(fname)
+ return data
+
+
+@retry(exception=AssertionError, logfun=None, timeout=GLOBAL_TIMEOUT,
+ interval=0.001)
+def call_until(fun, expr):
+ """Keep calling function for timeout secs and exit if eval()
+ expression is True.
+ """
+ ret = fun()
+ assert eval(expr)
+ return ret
+
+
+# ===================================================================
+# --- fs
+# ===================================================================
+
+
+def safe_rmpath(path):
+ """Convenience function for removing temporary test files or dirs."""
+ def retry_fun(fun):
+ # On Windows it could happen that the file or directory has
+ # open handles or references preventing the delete operation
+ # to succeed immediately, so we retry for a while. See:
+ # https://bugs.python.org/issue33240
+ stop_at = time.time() + GLOBAL_TIMEOUT
+ while time.time() < stop_at:
+ try:
+ return fun()
+ except FileNotFoundError:
+ pass
+ except WindowsError as _:
+ err = _
+ warn("ignoring %s" % (str(err)))
+ time.sleep(0.01)
+ raise err
+
+ try:
+ st = os.stat(path)
+ if stat.S_ISDIR(st.st_mode):
+ fun = functools.partial(shutil.rmtree, path)
+ else:
+ fun = functools.partial(os.remove, path)
+ if POSIX:
+ fun()
+ else:
+ retry_fun(fun)
+ except FileNotFoundError:
+ pass
+
+
+def safe_mkdir(dir):
+ """Convenience function for creating a directory."""
+ try:
+ os.mkdir(dir)
+ except FileExistsError:
+ pass
+
+
+@contextlib.contextmanager
+def chdir(dirname):
+ """Context manager which temporarily changes the current directory."""
+ curdir = os.getcwd()
+ try:
+ os.chdir(dirname)
+ yield
+ finally:
+ os.chdir(curdir)
+
+
+def create_exe(outpath, c_code=None):
+ """Creates an executable file in the given location."""
+ assert not os.path.exists(outpath), outpath
+ if c_code:
+ if not which("gcc"):
+ raise ValueError("gcc is not installed")
+ if isinstance(c_code, bool): # c_code is True
+ c_code = textwrap.dedent(
+ """
+ #include <unistd.h>
+ int main() {
+ pause();
+ return 1;
+ }
+ """)
+ assert isinstance(c_code, str), c_code
+ with open(get_testfn(suffix='.c'), 'wt') as f:
+ f.write(c_code)
+ try:
+ subprocess.check_call(["gcc", f.name, "-o", outpath])
+ finally:
+ safe_rmpath(f.name)
+ else:
+ # copy python executable
+ shutil.copyfile(PYTHON_EXE, outpath)
+ if POSIX:
+ st = os.stat(outpath)
+ os.chmod(outpath, st.st_mode | stat.S_IEXEC)
+
+
+def get_testfn(suffix="", dir=None):
+ """Return an absolute pathname of a file or dir that did not
+ exist at the time this call is made. Also schedule it for safe
+ deletion at interpreter exit. It's technically racy but probably
+ not really due to the time variant.
+ """
+ while True:
+ name = tempfile.mktemp(prefix=TESTFN_PREFIX, suffix=suffix, dir=dir)
+ if not os.path.exists(name): # also include dirs
+ return os.path.realpath(name) # needed for OSX
+
+
+# ===================================================================
+# --- testing
+# ===================================================================
+
+
+class TestCase(unittest.TestCase):
+
+ # Print a full path representation of the single unit tests
+ # being run.
+ def __str__(self):
+ fqmod = self.__class__.__module__
+ if not fqmod.startswith('psutil.'):
+ fqmod = 'psutil.tests.' + fqmod
+ return "%s.%s.%s" % (
+ fqmod, self.__class__.__name__, self._testMethodName)
+
+ # assertRaisesRegexp renamed to assertRaisesRegex in 3.3;
+ # add support for the new name.
+ if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
+ assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
+
+ # ...otherwise multiprocessing.Pool complains
+ if not PY3:
+ def runTest(self):
+ pass
+
+ @contextlib.contextmanager
+ def subTest(self, *args, **kw):
+ # fake it for python 2.7
+ yield
+
+
+# monkey patch default unittest.TestCase
+unittest.TestCase = TestCase
+
+
+class PsutilTestCase(TestCase):
+ """Test class providing auto-cleanup wrappers on top of process
+ test utilities.
+ """
+
+ def get_testfn(self, suffix="", dir=None):
+ fname = get_testfn(suffix=suffix, dir=dir)
+ self.addCleanup(safe_rmpath, fname)
+ return fname
+
+ def spawn_testproc(self, *args, **kwds):
+ sproc = spawn_testproc(*args, **kwds)
+ self.addCleanup(terminate, sproc)
+ return sproc
+
+ def spawn_children_pair(self):
+ child1, child2 = spawn_children_pair()
+ self.addCleanup(terminate, child2)
+ self.addCleanup(terminate, child1) # executed first
+ return (child1, child2)
+
+ def spawn_zombie(self):
+ parent, zombie = spawn_zombie()
+ self.addCleanup(terminate, zombie)
+ self.addCleanup(terminate, parent) # executed first
+ return (parent, zombie)
+
+ def pyrun(self, *args, **kwds):
+ sproc, srcfile = pyrun(*args, **kwds)
+ self.addCleanup(safe_rmpath, srcfile)
+ self.addCleanup(terminate, sproc) # executed first
+ return sproc
+
+ def assertProcessGone(self, proc):
+ self.assertRaises(psutil.NoSuchProcess, psutil.Process, proc.pid)
+ if isinstance(proc, (psutil.Process, psutil.Popen)):
+ assert not proc.is_running()
+ try:
+ status = proc.status()
+ except psutil.NoSuchProcess:
+ pass
+ else:
+ raise AssertionError("Process.status() didn't raise exception "
+ "(status=%s)" % status)
+ proc.wait(timeout=0) # assert not raise TimeoutExpired
+ assert not psutil.pid_exists(proc.pid), proc.pid
+ self.assertNotIn(proc.pid, psutil.pids())
+
+
+@unittest.skipIf(PYPY, "unreliable on PYPY")
+class TestMemoryLeak(PsutilTestCase):
+ """Test framework class for detecting function memory leaks,
+ typically functions implemented in C which forgot to free() memory
+ from the heap. It does so by checking whether the process memory
+ usage increased before and after calling the function many times.
+
+ Note that this is hard (probably impossible) to do reliably, due
+ to how the OS handles memory, the GC and so on (memory can even
+ decrease!). In order to avoid false positives, in case of failure
+ (mem > 0) we retry the test for up to 5 times, increasing call
+ repetitions each time. If the memory keeps increasing then it's a
+ failure.
+
+ If available (Linux, OSX, Windows), USS memory is used for comparison,
+ since it's supposed to be more precise, see:
+ https://gmpy.dev/blog/2016/real-process-memory-and-environ-in-python
+ If not, RSS memory is used. mallinfo() on Linux and _heapwalk() on
+ Windows may give even more precision, but at the moment are not
+ implemented.
+
+ PyPy appears to be completely unstable for this framework, probably
+ because of its JIT, so tests on PYPY are skipped.
+
+ Usage:
+
+ class TestLeaks(psutil.tests.TestMemoryLeak):
+
+ def test_fun(self):
+ self.execute(some_function)
+ """
+ # Configurable class attrs.
+ times = 200
+ warmup_times = 10
+ tolerance = 0 # memory
+ retries = 10 if CI_TESTING else 5
+ verbose = True
+ _thisproc = psutil.Process()
+ _psutil_debug_orig = bool(os.getenv('PSUTIL_DEBUG', 0))
+
+ @classmethod
+ def setUpClass(cls):
+ psutil._set_debug(False) # avoid spamming to stderr
+
+ @classmethod
+ def tearDownClass(cls):
+ psutil._set_debug(cls._psutil_debug_orig)
+
+ def _get_mem(self):
+ # USS is the closest thing we have to "real" memory usage and it
+ # should be less likely to produce false positives.
+ mem = self._thisproc.memory_full_info()
+ return getattr(mem, "uss", mem.rss)
+
+ def _get_num_fds(self):
+ if POSIX:
+ return self._thisproc.num_fds()
+ else:
+ return self._thisproc.num_handles()
+
+ def _log(self, msg):
+ if self.verbose:
+ print_color(msg, color="yellow", file=sys.stderr)
+
+ def _check_fds(self, fun):
+ """Makes sure num_fds() (POSIX) or num_handles() (Windows) does
+ not increase after calling a function. Used to discover forgotten
+ close(2) and CloseHandle syscalls.
+ """
+ before = self._get_num_fds()
+ self.call(fun)
+ after = self._get_num_fds()
+ diff = after - before
+ if diff < 0:
+ raise self.fail("negative diff %r (gc probably collected a "
+ "resource from a previous test)" % diff)
+ if diff > 0:
+ type_ = "fd" if POSIX else "handle"
+ if diff > 1:
+ type_ += "s"
+ msg = "%s unclosed %s after calling %r" % (diff, type_, fun)
+ raise self.fail(msg)
+
+ def _call_ntimes(self, fun, times):
+ """Get 2 distinct memory samples, before and after having
+ called fun repeatedly, and return the memory difference.
+ """
+ gc.collect(generation=1)
+ mem1 = self._get_mem()
+ for x in range(times):
+ ret = self.call(fun)
+ del x, ret
+ gc.collect(generation=1)
+ mem2 = self._get_mem()
+ self.assertEqual(gc.garbage, [])
+ diff = mem2 - mem1 # can also be negative
+ return diff
+
+ def _check_mem(self, fun, times, warmup_times, retries, tolerance):
+ messages = []
+ prev_mem = 0
+ increase = times
+ for idx in range(1, retries + 1):
+ mem = self._call_ntimes(fun, times)
+ msg = "Run #%s: extra-mem=%s, per-call=%s, calls=%s" % (
+ idx, bytes2human(mem), bytes2human(mem / times), times)
+ messages.append(msg)
+ success = mem <= tolerance or mem <= prev_mem
+ if success:
+ if idx > 1:
+ self._log(msg)
+ return
+ else:
+ if idx == 1:
+ print() # NOQA
+ self._log(msg)
+ times += increase
+ prev_mem = mem
+ raise self.fail(". ".join(messages))
+
+ # ---
+
+ def call(self, fun):
+ return fun()
+
+ def execute(self, fun, times=None, warmup_times=None, retries=None,
+ tolerance=None):
+ """Test a callable."""
+ times = times if times is not None else self.times
+ warmup_times = warmup_times if warmup_times is not None \
+ else self.warmup_times
+ retries = retries if retries is not None else self.retries
+ tolerance = tolerance if tolerance is not None else self.tolerance
+ try:
+ assert times >= 1, "times must be >= 1"
+ assert warmup_times >= 0, "warmup_times must be >= 0"
+ assert retries >= 0, "retries must be >= 0"
+ assert tolerance >= 0, "tolerance must be >= 0"
+ except AssertionError as err:
+ raise ValueError(str(err))
+
+ self._call_ntimes(fun, warmup_times) # warm up
+ self._check_fds(fun)
+ self._check_mem(fun, times=times, warmup_times=warmup_times,
+ retries=retries, tolerance=tolerance)
+
+ def execute_w_exc(self, exc, fun, **kwargs):
+ """Convenience method to test a callable while making sure it
+ raises an exception on every call.
+ """
+ def call():
+ self.assertRaises(exc, fun)
+
+ self.execute(call, **kwargs)
+
+
+def print_sysinfo():
+ import collections
+ import datetime
+ import getpass
+ import locale
+ import platform
+ import pprint
+ try:
+ import pip
+ except ImportError:
+ pip = None
+ try:
+ import wheel
+ except ImportError:
+ wheel = None
+
+ info = collections.OrderedDict()
+
+ # OS
+ if psutil.LINUX and which('lsb_release'):
+ info['OS'] = sh('lsb_release -d -s')
+ elif psutil.OSX:
+ info['OS'] = 'Darwin %s' % platform.mac_ver()[0]
+ elif psutil.WINDOWS:
+ info['OS'] = "Windows " + ' '.join(
+ map(str, platform.win32_ver()))
+ if hasattr(platform, 'win32_edition'):
+ info['OS'] += ", " + platform.win32_edition()
+ else:
+ info['OS'] = "%s %s" % (platform.system(), platform.version())
+ info['arch'] = ', '.join(
+ list(platform.architecture()) + [platform.machine()])
+ if psutil.POSIX:
+ info['kernel'] = platform.uname()[2]
+
+ # python
+ info['python'] = ', '.join([
+ platform.python_implementation(),
+ platform.python_version(),
+ platform.python_compiler()])
+ info['pip'] = getattr(pip, '__version__', 'not installed')
+ if wheel is not None:
+ info['pip'] += " (wheel=%s)" % wheel.__version__
+
+ # UNIX
+ if psutil.POSIX:
+ if which('gcc'):
+ out = sh(['gcc', '--version'])
+ info['gcc'] = str(out).split('\n')[0]
+ else:
+ info['gcc'] = 'not installed'
+ s = platform.libc_ver()[1]
+ if s:
+ info['glibc'] = s
+
+ # system
+ info['fs-encoding'] = sys.getfilesystemencoding()
+ lang = locale.getlocale()
+ info['lang'] = '%s, %s' % (lang[0], lang[1])
+ info['boot-time'] = datetime.datetime.fromtimestamp(
+ psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")
+ info['time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ info['user'] = getpass.getuser()
+ info['home'] = os.path.expanduser("~")
+ info['cwd'] = os.getcwd()
+ info['pyexe'] = PYTHON_EXE
+ info['hostname'] = platform.node()
+ info['PID'] = os.getpid()
+
+ # metrics
+ info['cpus'] = psutil.cpu_count()
+ info['loadavg'] = "%.1f%%, %.1f%%, %.1f%%" % (
+ tuple([x / psutil.cpu_count() * 100 for x in psutil.getloadavg()]))
+ mem = psutil.virtual_memory()
+ info['memory'] = "%s%%, used=%s, total=%s" % (
+ int(mem.percent), bytes2human(mem.used), bytes2human(mem.total))
+ swap = psutil.swap_memory()
+ info['swap'] = "%s%%, used=%s, total=%s" % (
+ int(swap.percent), bytes2human(swap.used), bytes2human(swap.total))
+ info['pids'] = len(psutil.pids())
+ pinfo = psutil.Process().as_dict()
+ pinfo.pop('memory_maps', None)
+ info['proc'] = pprint.pformat(pinfo)
+
+ print("=" * 70, file=sys.stderr) # NOQA
+ for k, v in info.items():
+ print("%-17s %s" % (k + ':', v), file=sys.stderr) # NOQA
+ print("=" * 70, file=sys.stderr) # NOQA
+ sys.stdout.flush()
+
+
+def _get_eligible_cpu():
+ p = psutil.Process()
+ if hasattr(p, "cpu_num"):
+ return p.cpu_num()
+ elif hasattr(p, "cpu_affinity"):
+ return random.choice(p.cpu_affinity())
+ return 0
+
+
+class process_namespace:
+ """A container that lists all Process class method names + some
+ reasonable parameters to be called with. Utility methods (parent(),
+ children(), ...) are excluded.
+
+ >>> ns = process_namespace(psutil.Process())
+ >>> for fun, name in ns.iter(ns.getters):
+ ... fun()
+ """
+ utils = [
+ ('cpu_percent', (), {}),
+ ('memory_percent', (), {}),
+ ]
+
+ ignored = [
+ ('as_dict', (), {}),
+ ('children', (), {'recursive': True}),
+ ('is_running', (), {}),
+ ('memory_info_ex', (), {}),
+ ('oneshot', (), {}),
+ ('parent', (), {}),
+ ('parents', (), {}),
+ ('pid', (), {}),
+ ('wait', (0, ), {}),
+ ]
+
+ getters = [
+ ('cmdline', (), {}),
+ ('connections', (), {'kind': 'all'}),
+ ('cpu_times', (), {}),
+ ('create_time', (), {}),
+ ('cwd', (), {}),
+ ('exe', (), {}),
+ ('memory_full_info', (), {}),
+ ('memory_info', (), {}),
+ ('name', (), {}),
+ ('nice', (), {}),
+ ('num_ctx_switches', (), {}),
+ ('num_threads', (), {}),
+ ('open_files', (), {}),
+ ('ppid', (), {}),
+ ('status', (), {}),
+ ('threads', (), {}),
+ ('username', (), {}),
+ ]
+ if POSIX:
+ getters += [('uids', (), {})]
+ getters += [('gids', (), {})]
+ getters += [('terminal', (), {})]
+ getters += [('num_fds', (), {})]
+ if HAS_PROC_IO_COUNTERS:
+ getters += [('io_counters', (), {})]
+ if HAS_IONICE:
+ getters += [('ionice', (), {})]
+ if HAS_RLIMIT:
+ getters += [('rlimit', (psutil.RLIMIT_NOFILE, ), {})]
+ if HAS_CPU_AFFINITY:
+ getters += [('cpu_affinity', (), {})]
+ if HAS_PROC_CPU_NUM:
+ getters += [('cpu_num', (), {})]
+ if HAS_ENVIRON:
+ getters += [('environ', (), {})]
+ if WINDOWS:
+ getters += [('num_handles', (), {})]
+ if HAS_MEMORY_MAPS:
+ getters += [('memory_maps', (), {'grouped': False})]
+
+ setters = []
+ if POSIX:
+ setters += [('nice', (0, ), {})]
+ else:
+ setters += [('nice', (psutil.NORMAL_PRIORITY_CLASS, ), {})]
+ if HAS_RLIMIT:
+ setters += [('rlimit', (psutil.RLIMIT_NOFILE, (1024, 4096)), {})]
+ if HAS_IONICE:
+ if LINUX:
+ setters += [('ionice', (psutil.IOPRIO_CLASS_NONE, 0), {})]
+ else:
+ setters += [('ionice', (psutil.IOPRIO_NORMAL, ), {})]
+ if HAS_CPU_AFFINITY:
+ setters += [('cpu_affinity', ([_get_eligible_cpu()], ), {})]
+
+ killers = [
+ ('send_signal', (signal.SIGTERM, ), {}),
+ ('suspend', (), {}),
+ ('resume', (), {}),
+ ('terminate', (), {}),
+ ('kill', (), {}),
+ ]
+ if WINDOWS:
+ killers += [('send_signal', (signal.CTRL_C_EVENT, ), {})]
+ killers += [('send_signal', (signal.CTRL_BREAK_EVENT, ), {})]
+
+ all = utils + getters + setters + killers
+
+ def __init__(self, proc):
+ self._proc = proc
+
+ def iter(self, ls, clear_cache=True):
+ """Given a list of tuples yields a set of (fun, fun_name) tuples
+ in random order.
+ """
+ ls = list(ls)
+ random.shuffle(ls)
+ for fun_name, args, kwds in ls:
+ if clear_cache:
+ self.clear_cache()
+ fun = getattr(self._proc, fun_name)
+ fun = functools.partial(fun, *args, **kwds)
+ yield (fun, fun_name)
+
+ def clear_cache(self):
+ """Clear the cache of a Process instance."""
+ self._proc._init(self._proc.pid, _ignore_nsp=True)
+
+ @classmethod
+ def test_class_coverage(cls, test_class, ls):
+ """Given a TestCase instance and a list of tuples checks that
+ the class defines the required test method names.
+ """
+ for fun_name, _, _ in ls:
+ meth_name = 'test_' + fun_name
+ if not hasattr(test_class, meth_name):
+ msg = "%r class should define a '%s' method" % (
+ test_class.__class__.__name__, meth_name)
+ raise AttributeError(msg)
+
+ @classmethod
+ def test(cls):
+ this = set([x[0] for x in cls.all])
+ ignored = set([x[0] for x in cls.ignored])
+ klass = set([x for x in dir(psutil.Process) if x[0] != '_'])
+ leftout = (this | ignored) ^ klass
+ if leftout:
+ raise ValueError("uncovered Process class names: %r" % leftout)
+
+
+class system_namespace:
+ """A container that lists all the module-level, system-related APIs.
+ Utilities such as cpu_percent() are excluded. Usage:
+
+ >>> ns = system_namespace
+ >>> for fun, name in ns.iter(ns.getters):
+ ... fun()
+ """
+ getters = [
+ ('boot_time', (), {}),
+ ('cpu_count', (), {'logical': False}),
+ ('cpu_count', (), {'logical': True}),
+ ('cpu_stats', (), {}),
+ ('cpu_times', (), {'percpu': False}),
+ ('cpu_times', (), {'percpu': True}),
+ ('disk_io_counters', (), {'perdisk': True}),
+ ('disk_partitions', (), {'all': True}),
+ ('disk_usage', (os.getcwd(), ), {}),
+ ('net_connections', (), {'kind': 'all'}),
+ ('net_if_addrs', (), {}),
+ ('net_if_stats', (), {}),
+ ('net_io_counters', (), {'pernic': True}),
+ ('pid_exists', (os.getpid(), ), {}),
+ ('pids', (), {}),
+ ('swap_memory', (), {}),
+ ('users', (), {}),
+ ('virtual_memory', (), {}),
+ ]
+ if HAS_CPU_FREQ:
+ getters += [('cpu_freq', (), {'percpu': True})]
+ if HAS_GETLOADAVG:
+ getters += [('getloadavg', (), {})]
+ if HAS_SENSORS_TEMPERATURES:
+ getters += [('sensors_temperatures', (), {})]
+ if HAS_SENSORS_FANS:
+ getters += [('sensors_fans', (), {})]
+ if HAS_SENSORS_BATTERY:
+ getters += [('sensors_battery', (), {})]
+ if WINDOWS:
+ getters += [('win_service_iter', (), {})]
+ getters += [('win_service_get', ('alg', ), {})]
+
+ ignored = [
+ ('process_iter', (), {}),
+ ('wait_procs', ([psutil.Process()], ), {}),
+ ('cpu_percent', (), {}),
+ ('cpu_times_percent', (), {}),
+ ]
+
+ all = getters
+
+ @staticmethod
+ def iter(ls):
+ """Given a list of tuples yields a set of (fun, fun_name) tuples
+ in random order.
+ """
+ ls = list(ls)
+ random.shuffle(ls)
+ for fun_name, args, kwds in ls:
+ fun = getattr(psutil, fun_name)
+ fun = functools.partial(fun, *args, **kwds)
+ yield (fun, fun_name)
+
+ test_class_coverage = process_namespace.test_class_coverage
+
+
+def serialrun(klass):
+ """A decorator to mark a TestCase class. When running parallel tests,
+ class' unit tests will be run serially (1 process).
+ """
+ # assert issubclass(klass, unittest.TestCase), klass
+ assert inspect.isclass(klass), klass
+ klass._serialrun = True
+ return klass
+
+
+def retry_on_failure(retries=NO_RETRIES):
+ """Decorator which runs a test function and retries N times before
+ actually failing.
+ """
+ def logfun(exc):
+ print("%r, retrying" % exc, file=sys.stderr) # NOQA
+
+ return retry(exception=AssertionError, timeout=None, retries=retries,
+ logfun=logfun)
+
+
+def skip_on_access_denied(only_if=None):
+ """Decorator to Ignore AccessDenied exceptions."""
+ def decorator(fun):
+ @functools.wraps(fun)
+ def wrapper(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except psutil.AccessDenied:
+ if only_if is not None:
+ if not only_if:
+ raise
+ raise unittest.SkipTest("raises AccessDenied")
+ return wrapper
+ return decorator
+
+
+def skip_on_not_implemented(only_if=None):
+ """Decorator to Ignore NotImplementedError exceptions."""
+ def decorator(fun):
+ @functools.wraps(fun)
+ def wrapper(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except NotImplementedError:
+ if only_if is not None:
+ if not only_if:
+ raise
+ msg = "%r was skipped because it raised NotImplementedError" \
+ % fun.__name__
+ raise unittest.SkipTest(msg)
+ return wrapper
+ return decorator
+
+
+# ===================================================================
+# --- network
+# ===================================================================
+
+
+# XXX: no longer used
+def get_free_port(host='127.0.0.1'):
+ """Return an unused TCP port. Subject to race conditions."""
+ with contextlib.closing(socket.socket()) as sock:
+ sock.bind((host, 0))
+ return sock.getsockname()[1]
+
+
+def bind_socket(family=AF_INET, type=SOCK_STREAM, addr=None):
+ """Binds a generic socket."""
+ if addr is None and family in (AF_INET, AF_INET6):
+ addr = ("", 0)
+ sock = socket.socket(family, type)
+ try:
+ if os.name not in ('nt', 'cygwin'):
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(addr)
+ if type == socket.SOCK_STREAM:
+ sock.listen(5)
+ return sock
+ except Exception:
+ sock.close()
+ raise
+
+
+def bind_unix_socket(name, type=socket.SOCK_STREAM):
+ """Bind a UNIX socket."""
+ assert psutil.POSIX
+ assert not os.path.exists(name), name
+ sock = socket.socket(socket.AF_UNIX, type)
+ try:
+ sock.bind(name)
+ if type == socket.SOCK_STREAM:
+ sock.listen(5)
+ except Exception:
+ sock.close()
+ raise
+ return sock
+
+
+def tcp_socketpair(family, addr=("", 0)):
+ """Build a pair of TCP sockets connected to each other.
+ Return a (server, client) tuple.
+ """
+ with contextlib.closing(socket.socket(family, SOCK_STREAM)) as ll:
+ ll.bind(addr)
+ ll.listen(5)
+ addr = ll.getsockname()
+ c = socket.socket(family, SOCK_STREAM)
+ try:
+ c.connect(addr)
+ caddr = c.getsockname()
+ while True:
+ a, addr = ll.accept()
+ # check that we've got the correct client
+ if addr == caddr:
+ return (a, c)
+ a.close()
+ except OSError:
+ c.close()
+ raise
+
+
+def unix_socketpair(name):
+ """Build a pair of UNIX sockets connected to each other through
+ the same UNIX file name.
+ Return a (server, client) tuple.
+ """
+ assert psutil.POSIX
+ server = client = None
+ try:
+ server = bind_unix_socket(name, type=socket.SOCK_STREAM)
+ server.setblocking(0)
+ client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ client.setblocking(0)
+ client.connect(name)
+ # new = server.accept()
+ except Exception:
+ if server is not None:
+ server.close()
+ if client is not None:
+ client.close()
+ raise
+ return (server, client)
+
+
+@contextlib.contextmanager
+def create_sockets():
+ """Open as many socket families / types as possible."""
+ socks = []
+ fname1 = fname2 = None
+ try:
+ socks.append(bind_socket(socket.AF_INET, socket.SOCK_STREAM))
+ socks.append(bind_socket(socket.AF_INET, socket.SOCK_DGRAM))
+ if supports_ipv6():
+ socks.append(bind_socket(socket.AF_INET6, socket.SOCK_STREAM))
+ socks.append(bind_socket(socket.AF_INET6, socket.SOCK_DGRAM))
+ if POSIX and HAS_CONNECTIONS_UNIX:
+ fname1 = get_testfn()
+ fname2 = get_testfn()
+ s1, s2 = unix_socketpair(fname1)
+ s3 = bind_unix_socket(fname2, type=socket.SOCK_DGRAM)
+ for s in (s1, s2, s3):
+ socks.append(s)
+ yield socks
+ finally:
+ for s in socks:
+ s.close()
+ for fname in (fname1, fname2):
+ if fname is not None:
+ safe_rmpath(fname)
+
+
+def check_net_address(addr, family):
+ """Check a net address validity. Supported families are IPv4,
+ IPv6 and MAC addresses.
+ """
+ import ipaddress # python >= 3.3 / requires "pip install ipaddress"
+ if enum and PY3 and not PYPY:
+ assert isinstance(family, enum.IntEnum), family
+ if family == socket.AF_INET:
+ octs = [int(x) for x in addr.split('.')]
+ assert len(octs) == 4, addr
+ for num in octs:
+ assert 0 <= num <= 255, addr
+ if not PY3:
+ addr = unicode(addr)
+ ipaddress.IPv4Address(addr)
+ elif family == socket.AF_INET6:
+ assert isinstance(addr, str), addr
+ if not PY3:
+ addr = unicode(addr)
+ ipaddress.IPv6Address(addr)
+ elif family == psutil.AF_LINK:
+ assert re.match(r'([a-fA-F0-9]{2}[:|\-]?){6}', addr) is not None, addr
+ else:
+ raise ValueError("unknown family %r", family)
+
+
+def check_connection_ntuple(conn):
+ """Check validity of a connection namedtuple."""
+ def check_ntuple(conn):
+ has_pid = len(conn) == 7
+ assert len(conn) in (6, 7), len(conn)
+ assert conn[0] == conn.fd, conn.fd
+ assert conn[1] == conn.family, conn.family
+ assert conn[2] == conn.type, conn.type
+ assert conn[3] == conn.laddr, conn.laddr
+ assert conn[4] == conn.raddr, conn.raddr
+ assert conn[5] == conn.status, conn.status
+ if has_pid:
+ assert conn[6] == conn.pid, conn.pid
+
+ def check_family(conn):
+ assert conn.family in (AF_INET, AF_INET6, AF_UNIX), conn.family
+ if enum is not None:
+ assert isinstance(conn.family, enum.IntEnum), conn
+ else:
+ assert isinstance(conn.family, int), conn
+ if conn.family == AF_INET:
+ # actually try to bind the local socket; ignore IPv6
+ # sockets as their address might be represented as
+ # an IPv4-mapped-address (e.g. "::127.0.0.1")
+ # and that's rejected by bind()
+ s = socket.socket(conn.family, conn.type)
+ with contextlib.closing(s):
+ try:
+ s.bind((conn.laddr[0], 0))
+ except socket.error as err:
+ if err.errno != errno.EADDRNOTAVAIL:
+ raise
+ elif conn.family == AF_UNIX:
+ assert conn.status == psutil.CONN_NONE, conn.status
+
+ def check_type(conn):
+ # SOCK_SEQPACKET may happen in case of AF_UNIX socks
+ SOCK_SEQPACKET = getattr(socket, "SOCK_SEQPACKET", object())
+ assert conn.type in (socket.SOCK_STREAM, socket.SOCK_DGRAM,
+ SOCK_SEQPACKET), conn.type
+ if enum is not None:
+ assert isinstance(conn.type, enum.IntEnum), conn
+ else:
+ assert isinstance(conn.type, int), conn
+ if conn.type == socket.SOCK_DGRAM:
+ assert conn.status == psutil.CONN_NONE, conn.status
+
+ def check_addrs(conn):
+ # check IP address and port sanity
+ for addr in (conn.laddr, conn.raddr):
+ if conn.family in (AF_INET, AF_INET6):
+ assert isinstance(addr, tuple), type(addr)
+ if not addr:
+ continue
+ assert isinstance(addr.port, int), type(addr.port)
+ assert 0 <= addr.port <= 65535, addr.port
+ check_net_address(addr.ip, conn.family)
+ elif conn.family == AF_UNIX:
+ assert isinstance(addr, str), type(addr)
+
+ def check_status(conn):
+ assert isinstance(conn.status, str), conn.status
+ valids = [getattr(psutil, x) for x in dir(psutil)
+ if x.startswith('CONN_')]
+ assert conn.status in valids, conn.status
+ if conn.family in (AF_INET, AF_INET6) and conn.type == SOCK_STREAM:
+ assert conn.status != psutil.CONN_NONE, conn.status
+ else:
+ assert conn.status == psutil.CONN_NONE, conn.status
+
+ check_ntuple(conn)
+ check_family(conn)
+ check_type(conn)
+ check_addrs(conn)
+ check_status(conn)
+
+
+# ===================================================================
+# --- compatibility
+# ===================================================================
+
+
+def reload_module(module):
+ """Backport of importlib.reload of Python 3.3+."""
+ try:
+ import importlib
+ if not hasattr(importlib, 'reload'): # python <=3.3
+ raise ImportError
+ except ImportError:
+ import imp
+ return imp.reload(module)
+ else:
+ return importlib.reload(module)
+
+
+def import_module_by_path(path):
+ name = os.path.splitext(os.path.basename(path))[0]
+ if sys.version_info[0] == 2:
+ import imp
+ return imp.load_source(name, path)
+ elif sys.version_info[:2] <= (3, 4):
+ from importlib.machinery import SourceFileLoader
+ return SourceFileLoader(name, path).load_module()
+ else:
+ import importlib.util
+ spec = importlib.util.spec_from_file_location(name, path)
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
+
+
+# ===================================================================
+# --- others
+# ===================================================================
+
+
+def warn(msg):
+ """Raise a warning msg."""
+ warnings.warn(msg, UserWarning)
+
+
+def is_namedtuple(x):
+ """Check if object is an instance of namedtuple."""
+ t = type(x)
+ b = t.__bases__
+ if len(b) != 1 or b[0] != tuple:
+ return False
+ f = getattr(t, '_fields', None)
+ if not isinstance(f, tuple):
+ return False
+ return all(type(n) == str for n in f)
+
+
+if POSIX:
+ @contextlib.contextmanager
+ def copyload_shared_lib(suffix=""):
+ """Ctx manager which picks up a random shared CO lib used
+ by this process, copies it in another location and loads it
+ in memory via ctypes. Return the new absolutized path.
+ """
+ exe = 'pypy' if PYPY else 'python'
+ ext = ".so"
+ dst = get_testfn(suffix=suffix + ext)
+ libs = [x.path for x in psutil.Process().memory_maps() if
+ os.path.splitext(x.path)[1] == ext and
+ exe in x.path.lower()]
+ src = random.choice(libs)
+ shutil.copyfile(src, dst)
+ try:
+ ctypes.CDLL(dst)
+ yield dst
+ finally:
+ safe_rmpath(dst)
+else:
+ @contextlib.contextmanager
+ def copyload_shared_lib(suffix=""):
+ """Ctx manager which picks up a random shared DLL lib used
+ by this process, copies it in another location and loads it
+ in memory via ctypes.
+ Return the new absolutized, normcased path.
+ """
+ from ctypes import WinError
+ from ctypes import wintypes
+ ext = ".dll"
+ dst = get_testfn(suffix=suffix + ext)
+ libs = [x.path for x in psutil.Process().memory_maps() if
+ x.path.lower().endswith(ext) and
+ 'python' in os.path.basename(x.path).lower() and
+ 'wow64' not in x.path.lower()]
+ if PYPY and not libs:
+ libs = [x.path for x in psutil.Process().memory_maps() if
+ 'pypy' in os.path.basename(x.path).lower()]
+ src = random.choice(libs)
+ shutil.copyfile(src, dst)
+ cfile = None
+ try:
+ cfile = ctypes.WinDLL(dst)
+ yield dst
+ finally:
+ # Work around OverflowError:
+ # - https://ci.appveyor.com/project/giampaolo/psutil/build/1207/
+ # job/o53330pbnri9bcw7
+ # - http://bugs.python.org/issue30286
+ # - http://stackoverflow.com/questions/23522055
+ if cfile is not None:
+ FreeLibrary = ctypes.windll.kernel32.FreeLibrary
+ FreeLibrary.argtypes = [wintypes.HMODULE]
+ ret = FreeLibrary(cfile._handle)
+ if ret == 0:
+ WinError()
+ safe_rmpath(dst)
+
+
+# ===================================================================
+# --- Exit funs (first is executed last)
+# ===================================================================
+
+
+# this is executed first
+@atexit.register
+def cleanup_test_procs():
+ reap_children(recursive=True)
+
+
+# atexit module does not execute exit functions in case of SIGTERM, which
+# gets sent to test subprocesses, which is a problem if they import this
+# module. With this it will. See:
+# https://gmpy.dev/blog/2016/how-to-always-execute-exit-functions-in-python
+if POSIX:
+ signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit(sig))
diff --git a/lib/psutil/tests/__main__.py b/lib/psutil/tests/__main__.py
new file mode 100644
index 0000000..e677352
--- /dev/null
+++ b/lib/psutil/tests/__main__.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Run unit tests. This is invoked by:
+$ python -m psutil.tests
+"""
+
+from .runner import main
+
+
+main()
diff --git a/lib/psutil/tests/runner.py b/lib/psutil/tests/runner.py
new file mode 100644
index 0000000..2e6f83e
--- /dev/null
+++ b/lib/psutil/tests/runner.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Unit test runner, providing new features on top of unittest module:
+- colourized output
+- parallel run (UNIX only)
+- print failures/tracebacks on CTRL+C
+- re-run failed tests only (make test-failed)
+
+Invocation examples:
+- make test
+- make test-failed
+
+Parallel:
+- make test-parallel
+- make test-process ARGS=--parallel
+"""
+
+from __future__ import print_function
+
+import atexit
+import optparse
+import os
+import sys
+import textwrap
+import time
+import unittest
+
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+try:
+ import concurrencytest # pip install concurrencytest
+except ImportError:
+ concurrencytest = None
+
+import psutil
+from psutil._common import hilite
+from psutil._common import print_color
+from psutil._common import term_supports_colors
+from psutil._compat import super
+from psutil.tests import CI_TESTING
+from psutil.tests import import_module_by_path
+from psutil.tests import print_sysinfo
+from psutil.tests import reap_children
+from psutil.tests import safe_rmpath
+
+
+VERBOSITY = 2
+FAILED_TESTS_FNAME = '.failed-tests.txt'
+NWORKERS = psutil.cpu_count() or 1
+USE_COLORS = not CI_TESTING and term_supports_colors()
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
+
+
+def cprint(msg, color, bold=False, file=None):
+ if file is None:
+ file = sys.stderr if color == 'red' else sys.stdout
+ if USE_COLORS:
+ print_color(msg, color, bold=bold, file=file)
+ else:
+ print(msg, file=file)
+
+
+class TestLoader:
+
+ testdir = HERE
+ skip_files = ['test_memleaks.py']
+ if "WHEELHOUSE_UPLOADER_USERNAME" in os.environ:
+ skip_files.extend(['test_osx.py', 'test_linux.py', 'test_posix.py'])
+
+ def _get_testmods(self):
+ return [os.path.join(self.testdir, x)
+ for x in os.listdir(self.testdir)
+ if x.startswith('test_') and x.endswith('.py') and
+ x not in self.skip_files]
+
+ def _iter_testmod_classes(self):
+ """Iterate over all test files in this directory and return
+ all TestCase classes in them.
+ """
+ for path in self._get_testmods():
+ mod = import_module_by_path(path)
+ for name in dir(mod):
+ obj = getattr(mod, name)
+ if isinstance(obj, type) and \
+ issubclass(obj, unittest.TestCase):
+ yield obj
+
+ def all(self):
+ suite = unittest.TestSuite()
+ for obj in self._iter_testmod_classes():
+ test = loadTestsFromTestCase(obj)
+ suite.addTest(test)
+ return suite
+
+ def last_failed(self):
+ # ...from previously failed test run
+ suite = unittest.TestSuite()
+ if not os.path.isfile(FAILED_TESTS_FNAME):
+ return suite
+ with open(FAILED_TESTS_FNAME, 'rt') as f:
+ names = f.read().split()
+ for n in names:
+ test = unittest.defaultTestLoader.loadTestsFromName(n)
+ suite.addTest(test)
+ return suite
+
+ def from_name(self, name):
+ if name.endswith('.py'):
+ name = os.path.splitext(os.path.basename(name))[0]
+ return unittest.defaultTestLoader.loadTestsFromName(name)
+
+
+class ColouredResult(unittest.TextTestResult):
+
+ def addSuccess(self, test):
+ unittest.TestResult.addSuccess(self, test)
+ cprint("OK", "green")
+
+ def addError(self, test, err):
+ unittest.TestResult.addError(self, test, err)
+ cprint("ERROR", "red", bold=True)
+
+ def addFailure(self, test, err):
+ unittest.TestResult.addFailure(self, test, err)
+ cprint("FAIL", "red")
+
+ def addSkip(self, test, reason):
+ unittest.TestResult.addSkip(self, test, reason)
+ cprint("skipped: %s" % reason.strip(), "brown")
+
+ def printErrorList(self, flavour, errors):
+ flavour = hilite(flavour, "red", bold=flavour == 'ERROR')
+ super().printErrorList(flavour, errors)
+
+
+class ColouredTextRunner(unittest.TextTestRunner):
+ """
+ A coloured text runner which also prints failed tests on KeyboardInterrupt
+ and save failed tests in a file so that they can be re-run.
+ """
+ resultclass = ColouredResult if USE_COLORS else unittest.TextTestResult
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.failed_tnames = set()
+
+ def _makeResult(self):
+ # Store result instance so that it can be accessed on
+ # KeyboardInterrupt.
+ self.result = super()._makeResult()
+ return self.result
+
+ def _write_last_failed(self):
+ if self.failed_tnames:
+ with open(FAILED_TESTS_FNAME, 'wt') as f:
+ for tname in self.failed_tnames:
+ f.write(tname + '\n')
+
+ def _save_result(self, result):
+ if not result.wasSuccessful():
+ for t in result.errors + result.failures:
+ tname = t[0].id()
+ self.failed_tnames.add(tname)
+
+ def _run(self, suite):
+ try:
+ result = super().run(suite)
+ except (KeyboardInterrupt, SystemExit):
+ result = self.runner.result
+ result.printErrors()
+ raise sys.exit(1)
+ else:
+ self._save_result(result)
+ return result
+
+ def _exit(self, success):
+ if success:
+ cprint("SUCCESS", "green", bold=True)
+ safe_rmpath(FAILED_TESTS_FNAME)
+ sys.exit(0)
+ else:
+ cprint("FAILED", "red", bold=True)
+ self._write_last_failed()
+ sys.exit(1)
+
+ def run(self, suite):
+ result = self._run(suite)
+ self._exit(result.wasSuccessful())
+
+
+class ParallelRunner(ColouredTextRunner):
+
+ @staticmethod
+ def _parallelize(suite):
+ def fdopen(fd, mode, *kwds):
+ stream = orig_fdopen(fd, mode)
+ atexit.register(stream.close)
+ return stream
+
+ # Monkey patch concurrencytest lib bug (fdopen() stream not closed).
+ # https://github.com/cgoldberg/concurrencytest/issues/11
+ orig_fdopen = os.fdopen
+ concurrencytest.os.fdopen = fdopen
+ forker = concurrencytest.fork_for_tests(NWORKERS)
+ return concurrencytest.ConcurrentTestSuite(suite, forker)
+
+ @staticmethod
+ def _split_suite(suite):
+ serial = unittest.TestSuite()
+ parallel = unittest.TestSuite()
+ for test in suite:
+ if test.countTestCases() == 0:
+ continue
+ elif isinstance(test, unittest.TestSuite):
+ test_class = test._tests[0].__class__
+ elif isinstance(test, unittest.TestCase):
+ test_class = test
+ else:
+ raise TypeError("can't recognize type %r" % test)
+
+ if getattr(test_class, '_serialrun', False):
+ serial.addTest(test)
+ else:
+ parallel.addTest(test)
+ return (serial, parallel)
+
+ def run(self, suite):
+ ser_suite, par_suite = self._split_suite(suite)
+ par_suite = self._parallelize(par_suite)
+
+ # run parallel
+ cprint("starting parallel tests using %s workers" % NWORKERS,
+ "green", bold=True)
+ t = time.time()
+ par = self._run(par_suite)
+ par_elapsed = time.time() - t
+
+ # At this point we should have N zombies (the workers), which
+ # will disappear with wait().
+ orphans = psutil.Process().children()
+ gone, alive = psutil.wait_procs(orphans, timeout=1)
+ if alive:
+ cprint("alive processes %s" % alive, "red")
+ reap_children()
+
+ # run serial
+ t = time.time()
+ ser = self._run(ser_suite)
+ ser_elapsed = time.time() - t
+
+ # print
+ if not par.wasSuccessful() and ser_suite.countTestCases() > 0:
+ par.printErrors() # print them again at the bottom
+ par_fails, par_errs, par_skips = map(len, (par.failures,
+ par.errors,
+ par.skipped))
+ ser_fails, ser_errs, ser_skips = map(len, (ser.failures,
+ ser.errors,
+ ser.skipped))
+ print(textwrap.dedent("""
+ +----------+----------+----------+----------+----------+----------+
+ | | total | failures | errors | skipped | time |
+ +----------+----------+----------+----------+----------+----------+
+ | parallel | %3s | %3s | %3s | %3s | %.2fs |
+ +----------+----------+----------+----------+----------+----------+
+ | serial | %3s | %3s | %3s | %3s | %.2fs |
+ +----------+----------+----------+----------+----------+----------+
+ """ % (par.testsRun, par_fails, par_errs, par_skips, par_elapsed,
+ ser.testsRun, ser_fails, ser_errs, ser_skips, ser_elapsed)))
+ print("Ran %s tests in %.3fs using %s workers" % (
+ par.testsRun + ser.testsRun, par_elapsed + ser_elapsed, NWORKERS))
+ ok = par.wasSuccessful() and ser.wasSuccessful()
+ self._exit(ok)
+
+
+def get_runner(parallel=False):
+ def warn(msg):
+ cprint(msg + " Running serial tests instead.", "red")
+ if parallel:
+ if psutil.WINDOWS:
+ warn("Can't run parallel tests on Windows.")
+ elif concurrencytest is None:
+ warn("concurrencytest module is not installed.")
+ elif NWORKERS == 1:
+ warn("Only 1 CPU available.")
+ else:
+ return ParallelRunner(verbosity=VERBOSITY)
+ return ColouredTextRunner(verbosity=VERBOSITY)
+
+
+# Used by test_*,py modules.
+def run_from_name(name):
+ if CI_TESTING:
+ print_sysinfo()
+ suite = TestLoader().from_name(name)
+ runner = get_runner()
+ runner.run(suite)
+
+
+def setup():
+ psutil._set_debug(True)
+
+
+def main():
+ setup()
+ usage = "python3 -m psutil.tests [opts] [test-name]"
+ parser = optparse.OptionParser(usage=usage, description="run unit tests")
+ parser.add_option("--last-failed",
+ action="store_true", default=False,
+ help="only run last failed tests")
+ parser.add_option("--parallel",
+ action="store_true", default=False,
+ help="run tests in parallel")
+ opts, args = parser.parse_args()
+
+ if not opts.last_failed:
+ safe_rmpath(FAILED_TESTS_FNAME)
+
+ # loader
+ loader = TestLoader()
+ if args:
+ if len(args) > 1:
+ parser.print_usage()
+ return sys.exit(1)
+ else:
+ suite = loader.from_name(args[0])
+ elif opts.last_failed:
+ suite = loader.last_failed()
+ else:
+ suite = loader.all()
+
+ if CI_TESTING:
+ print_sysinfo()
+ runner = get_runner(opts.parallel)
+ runner.run(suite)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/psutil/tests/test_aix.py b/lib/psutil/tests/test_aix.py
new file mode 100644
index 0000000..4a23b77
--- /dev/null
+++ b/lib/psutil/tests/test_aix.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'
+# Copyright (c) 2017, Arnon Yaari
+# All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""AIX specific tests."""
+
+import re
+import unittest
+
+import psutil
+from psutil import AIX
+from psutil.tests import PsutilTestCase
+from psutil.tests import sh
+
+
+@unittest.skipIf(not AIX, "AIX only")
+class AIXSpecificTestCase(PsutilTestCase):
+
+ def test_virtual_memory(self):
+ out = sh('/usr/bin/svmon -O unit=KB')
+ re_pattern = r"memory\s*"
+ for field in ("size inuse free pin virtual available mmode").split():
+ re_pattern += r"(?P<%s>\S+)\s+" % (field,)
+ matchobj = re.search(re_pattern, out)
+
+ self.assertIsNotNone(
+ matchobj, "svmon command returned unexpected output")
+
+ KB = 1024
+ total = int(matchobj.group("size")) * KB
+ available = int(matchobj.group("available")) * KB
+ used = int(matchobj.group("inuse")) * KB
+ free = int(matchobj.group("free")) * KB
+
+ psutil_result = psutil.virtual_memory()
+
+ # TOLERANCE_SYS_MEM from psutil.tests is not enough. For some reason
+ # we're seeing differences of ~1.2 MB. 2 MB is still a good tolerance
+ # when compared to GBs.
+ TOLERANCE_SYS_MEM = 2 * KB * KB # 2 MB
+ self.assertEqual(psutil_result.total, total)
+ self.assertAlmostEqual(
+ psutil_result.used, used, delta=TOLERANCE_SYS_MEM)
+ self.assertAlmostEqual(
+ psutil_result.available, available, delta=TOLERANCE_SYS_MEM)
+ self.assertAlmostEqual(
+ psutil_result.free, free, delta=TOLERANCE_SYS_MEM)
+
+ def test_swap_memory(self):
+ out = sh('/usr/sbin/lsps -a')
+ # From the man page, "The size is given in megabytes" so we assume
+ # we'll always have 'MB' in the result
+ # TODO maybe try to use "swap -l" to check "used" too, but its units
+ # are not guaranteed to be "MB" so parsing may not be consistent
+ matchobj = re.search(r"(?P<space>\S+)\s+"
+ r"(?P<vol>\S+)\s+"
+ r"(?P<vg>\S+)\s+"
+ r"(?P<size>\d+)MB", out)
+
+ self.assertIsNotNone(
+ matchobj, "lsps command returned unexpected output")
+
+ total_mb = int(matchobj.group("size"))
+ MB = 1024 ** 2
+ psutil_result = psutil.swap_memory()
+ # we divide our result by MB instead of multiplying the lsps value by
+ # MB because lsps may round down, so we round down too
+ self.assertEqual(int(psutil_result.total / MB), total_mb)
+
+ def test_cpu_stats(self):
+ out = sh('/usr/bin/mpstat -a')
+
+ re_pattern = r"ALL\s*"
+ for field in ("min maj mpcs mpcr dev soft dec ph cs ics bound rq "
+ "push S3pull S3grd S0rd S1rd S2rd S3rd S4rd S5rd "
+ "sysc").split():
+ re_pattern += r"(?P<%s>\S+)\s+" % (field,)
+ matchobj = re.search(re_pattern, out)
+
+ self.assertIsNotNone(
+ matchobj, "mpstat command returned unexpected output")
+
+ # numbers are usually in the millions so 1000 is ok for tolerance
+ CPU_STATS_TOLERANCE = 1000
+ psutil_result = psutil.cpu_stats()
+ self.assertAlmostEqual(
+ psutil_result.ctx_switches,
+ int(matchobj.group("cs")),
+ delta=CPU_STATS_TOLERANCE)
+ self.assertAlmostEqual(
+ psutil_result.syscalls,
+ int(matchobj.group("sysc")),
+ delta=CPU_STATS_TOLERANCE)
+ self.assertAlmostEqual(
+ psutil_result.interrupts,
+ int(matchobj.group("dev")),
+ delta=CPU_STATS_TOLERANCE)
+ self.assertAlmostEqual(
+ psutil_result.soft_interrupts,
+ int(matchobj.group("soft")),
+ delta=CPU_STATS_TOLERANCE)
+
+ def test_cpu_count_logical(self):
+ out = sh('/usr/bin/mpstat -a')
+ mpstat_lcpu = int(re.search(r"lcpu=(\d+)", out).group(1))
+ psutil_lcpu = psutil.cpu_count(logical=True)
+ self.assertEqual(mpstat_lcpu, psutil_lcpu)
+
+ def test_net_if_addrs_names(self):
+ out = sh('/etc/ifconfig -l')
+ ifconfig_names = set(out.split())
+ psutil_names = set(psutil.net_if_addrs().keys())
+ self.assertSetEqual(ifconfig_names, psutil_names)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_bsd.py b/lib/psutil/tests/test_bsd.py
new file mode 100644
index 0000000..e541547
--- /dev/null
+++ b/lib/psutil/tests/test_bsd.py
@@ -0,0 +1,568 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO: (FreeBSD) add test for comparing connections with 'sockstat' cmd.
+
+
+"""Tests specific to all BSD platforms."""
+
+
+import datetime
+import os
+import re
+import time
+import unittest
+
+import psutil
+from psutil import BSD
+from psutil import FREEBSD
+from psutil import NETBSD
+from psutil import OPENBSD
+from psutil.tests import HAS_BATTERY
+from psutil.tests import TOLERANCE_SYS_MEM
+from psutil.tests import PsutilTestCase
+from psutil.tests import retry_on_failure
+from psutil.tests import sh
+from psutil.tests import spawn_testproc
+from psutil.tests import terminate
+from psutil.tests import which
+
+
+if BSD:
+ from psutil._psutil_posix import getpagesize
+
+ PAGESIZE = getpagesize()
+ # muse requires root privileges
+ MUSE_AVAILABLE = True if os.getuid() == 0 and which('muse') else False
+else:
+ PAGESIZE = None
+ MUSE_AVAILABLE = False
+
+
+def sysctl(cmdline):
+ """Expects a sysctl command with an argument and parse the result
+ returning only the value of interest.
+ """
+ result = sh("sysctl " + cmdline)
+ if FREEBSD:
+ result = result[result.find(": ") + 2:]
+ elif OPENBSD or NETBSD:
+ result = result[result.find("=") + 1:]
+ try:
+ return int(result)
+ except ValueError:
+ return result
+
+
+def muse(field):
+ """Thin wrapper around 'muse' cmdline utility."""
+ out = sh('muse')
+ for line in out.split('\n'):
+ if line.startswith(field):
+ break
+ else:
+ raise ValueError("line not found")
+ return int(line.split()[1])
+
+
+# =====================================================================
+# --- All BSD*
+# =====================================================================
+
+
+@unittest.skipIf(not BSD, "BSD only")
+class BSDTestCase(PsutilTestCase):
+ """Generic tests common to all BSD variants."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ @unittest.skipIf(NETBSD, "-o lstart doesn't work on NETBSD")
+ def test_process_create_time(self):
+ output = sh("ps -o lstart -p %s" % self.pid)
+ start_ps = output.replace('STARTED', '').strip()
+ start_psutil = psutil.Process(self.pid).create_time()
+ start_psutil = time.strftime("%a %b %e %H:%M:%S %Y",
+ time.localtime(start_psutil))
+ self.assertEqual(start_ps, start_psutil)
+
+ def test_disks(self):
+ # test psutil.disk_usage() and psutil.disk_partitions()
+ # against "df -a"
+ def df(path):
+ out = sh('df -k "%s"' % path).strip()
+ lines = out.split('\n')
+ lines.pop(0)
+ line = lines.pop(0)
+ dev, total, used, free = line.split()[:4]
+ if dev == 'none':
+ dev = ''
+ total = int(total) * 1024
+ used = int(used) * 1024
+ free = int(free) * 1024
+ return dev, total, used, free
+
+ for part in psutil.disk_partitions(all=False):
+ usage = psutil.disk_usage(part.mountpoint)
+ dev, total, used, free = df(part.mountpoint)
+ self.assertEqual(part.device, dev)
+ self.assertEqual(usage.total, total)
+ # 10 MB tolerance
+ if abs(usage.free - free) > 10 * 1024 * 1024:
+ raise self.fail("psutil=%s, df=%s" % (usage.free, free))
+ if abs(usage.used - used) > 10 * 1024 * 1024:
+ raise self.fail("psutil=%s, df=%s" % (usage.used, used))
+
+ @unittest.skipIf(not which('sysctl'), "sysctl cmd not available")
+ def test_cpu_count_logical(self):
+ syst = sysctl("hw.ncpu")
+ self.assertEqual(psutil.cpu_count(logical=True), syst)
+
+ @unittest.skipIf(not which('sysctl'), "sysctl cmd not available")
+ def test_virtual_memory_total(self):
+ num = sysctl('hw.physmem')
+ self.assertEqual(num, psutil.virtual_memory().total)
+
+ def test_net_if_stats(self):
+ for name, stats in psutil.net_if_stats().items():
+ try:
+ out = sh("ifconfig %s" % name)
+ except RuntimeError:
+ pass
+ else:
+ self.assertEqual(stats.isup, 'RUNNING' in out, msg=out)
+ if "mtu" in out:
+ self.assertEqual(stats.mtu,
+ int(re.findall(r'mtu (\d+)', out)[0]))
+
+
+# =====================================================================
+# --- FreeBSD
+# =====================================================================
+
+
+@unittest.skipIf(not FREEBSD, "FREEBSD only")
+class FreeBSDPsutilTestCase(PsutilTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ @retry_on_failure()
+ def test_memory_maps(self):
+ out = sh('procstat -v %s' % self.pid)
+ maps = psutil.Process(self.pid).memory_maps(grouped=False)
+ lines = out.split('\n')[1:]
+ while lines:
+ line = lines.pop()
+ fields = line.split()
+ _, start, stop, perms, res = fields[:5]
+ map = maps.pop()
+ self.assertEqual("%s-%s" % (start, stop), map.addr)
+ self.assertEqual(int(res), map.rss)
+ if not map.path.startswith('['):
+ self.assertEqual(fields[10], map.path)
+
+ def test_exe(self):
+ out = sh('procstat -b %s' % self.pid)
+ self.assertEqual(psutil.Process(self.pid).exe(),
+ out.split('\n')[1].split()[-1])
+
+ def test_cmdline(self):
+ out = sh('procstat -c %s' % self.pid)
+ self.assertEqual(' '.join(psutil.Process(self.pid).cmdline()),
+ ' '.join(out.split('\n')[1].split()[2:]))
+
+ def test_uids_gids(self):
+ out = sh('procstat -s %s' % self.pid)
+ euid, ruid, suid, egid, rgid, sgid = out.split('\n')[1].split()[2:8]
+ p = psutil.Process(self.pid)
+ uids = p.uids()
+ gids = p.gids()
+ self.assertEqual(uids.real, int(ruid))
+ self.assertEqual(uids.effective, int(euid))
+ self.assertEqual(uids.saved, int(suid))
+ self.assertEqual(gids.real, int(rgid))
+ self.assertEqual(gids.effective, int(egid))
+ self.assertEqual(gids.saved, int(sgid))
+
+ @retry_on_failure()
+ def test_ctx_switches(self):
+ tested = []
+ out = sh('procstat -r %s' % self.pid)
+ p = psutil.Process(self.pid)
+ for line in out.split('\n'):
+ line = line.lower().strip()
+ if ' voluntary context' in line:
+ pstat_value = int(line.split()[-1])
+ psutil_value = p.num_ctx_switches().voluntary
+ self.assertEqual(pstat_value, psutil_value)
+ tested.append(None)
+ elif ' involuntary context' in line:
+ pstat_value = int(line.split()[-1])
+ psutil_value = p.num_ctx_switches().involuntary
+ self.assertEqual(pstat_value, psutil_value)
+ tested.append(None)
+ if len(tested) != 2:
+ raise RuntimeError("couldn't find lines match in procstat out")
+
+ @retry_on_failure()
+ def test_cpu_times(self):
+ tested = []
+ out = sh('procstat -r %s' % self.pid)
+ p = psutil.Process(self.pid)
+ for line in out.split('\n'):
+ line = line.lower().strip()
+ if 'user time' in line:
+ pstat_value = float('0.' + line.split()[-1].split('.')[-1])
+ psutil_value = p.cpu_times().user
+ self.assertEqual(pstat_value, psutil_value)
+ tested.append(None)
+ elif 'system time' in line:
+ pstat_value = float('0.' + line.split()[-1].split('.')[-1])
+ psutil_value = p.cpu_times().system
+ self.assertEqual(pstat_value, psutil_value)
+ tested.append(None)
+ if len(tested) != 2:
+ raise RuntimeError("couldn't find lines match in procstat out")
+
+
+@unittest.skipIf(not FREEBSD, "FREEBSD only")
+class FreeBSDSystemTestCase(PsutilTestCase):
+
+ @staticmethod
+ def parse_swapinfo():
+ # the last line is always the total
+ output = sh("swapinfo -k").splitlines()[-1]
+ parts = re.split(r'\s+', output)
+
+ if not parts:
+ raise ValueError("Can't parse swapinfo: %s" % output)
+
+ # the size is in 1k units, so multiply by 1024
+ total, used, free = (int(p) * 1024 for p in parts[1:4])
+ return total, used, free
+
+ def test_cpu_frequency_against_sysctl(self):
+ # Currently only cpu 0 is frequency is supported in FreeBSD
+ # All other cores use the same frequency.
+ sensor = "dev.cpu.0.freq"
+ try:
+ sysctl_result = int(sysctl(sensor))
+ except RuntimeError:
+ self.skipTest("frequencies not supported by kernel")
+ self.assertEqual(psutil.cpu_freq().current, sysctl_result)
+
+ sensor = "dev.cpu.0.freq_levels"
+ sysctl_result = sysctl(sensor)
+ # sysctl returns a string of the format:
+ # <freq_level_1>/<voltage_level_1> <freq_level_2>/<voltage_level_2>...
+ # Ordered highest available to lowest available.
+ max_freq = int(sysctl_result.split()[0].split("/")[0])
+ min_freq = int(sysctl_result.split()[-1].split("/")[0])
+ self.assertEqual(psutil.cpu_freq().max, max_freq)
+ self.assertEqual(psutil.cpu_freq().min, min_freq)
+
+ # --- virtual_memory(); tests against sysctl
+
+ @retry_on_failure()
+ def test_vmem_active(self):
+ syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE
+ self.assertAlmostEqual(psutil.virtual_memory().active, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_inactive(self):
+ syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE
+ self.assertAlmostEqual(psutil.virtual_memory().inactive, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_wired(self):
+ syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE
+ self.assertAlmostEqual(psutil.virtual_memory().wired, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_cached(self):
+ syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE
+ self.assertAlmostEqual(psutil.virtual_memory().cached, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_free(self):
+ syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE
+ self.assertAlmostEqual(psutil.virtual_memory().free, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_buffers(self):
+ syst = sysctl("vfs.bufspace")
+ self.assertAlmostEqual(psutil.virtual_memory().buffers, syst,
+ delta=TOLERANCE_SYS_MEM)
+
+ # --- virtual_memory(); tests against muse
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ def test_muse_vmem_total(self):
+ num = muse('Total')
+ self.assertEqual(psutil.virtual_memory().total, num)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_active(self):
+ num = muse('Active')
+ self.assertAlmostEqual(psutil.virtual_memory().active, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_inactive(self):
+ num = muse('Inactive')
+ self.assertAlmostEqual(psutil.virtual_memory().inactive, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_wired(self):
+ num = muse('Wired')
+ self.assertAlmostEqual(psutil.virtual_memory().wired, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_cached(self):
+ num = muse('Cache')
+ self.assertAlmostEqual(psutil.virtual_memory().cached, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_free(self):
+ num = muse('Free')
+ self.assertAlmostEqual(psutil.virtual_memory().free, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ @unittest.skipIf(not MUSE_AVAILABLE, "muse not installed")
+ @retry_on_failure()
+ def test_muse_vmem_buffers(self):
+ num = muse('Buffer')
+ self.assertAlmostEqual(psutil.virtual_memory().buffers, num,
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_cpu_stats_ctx_switches(self):
+ self.assertAlmostEqual(psutil.cpu_stats().ctx_switches,
+ sysctl('vm.stats.sys.v_swtch'), delta=1000)
+
+ def test_cpu_stats_interrupts(self):
+ self.assertAlmostEqual(psutil.cpu_stats().interrupts,
+ sysctl('vm.stats.sys.v_intr'), delta=1000)
+
+ def test_cpu_stats_soft_interrupts(self):
+ self.assertAlmostEqual(psutil.cpu_stats().soft_interrupts,
+ sysctl('vm.stats.sys.v_soft'), delta=1000)
+
+ @retry_on_failure()
+ def test_cpu_stats_syscalls(self):
+ # pretty high tolerance but it looks like it's OK.
+ self.assertAlmostEqual(psutil.cpu_stats().syscalls,
+ sysctl('vm.stats.sys.v_syscall'), delta=200000)
+
+ # def test_cpu_stats_traps(self):
+ # self.assertAlmostEqual(psutil.cpu_stats().traps,
+ # sysctl('vm.stats.sys.v_trap'), delta=1000)
+
+ # --- swap memory
+
+ def test_swapmem_free(self):
+ total, used, free = self.parse_swapinfo()
+ self.assertAlmostEqual(
+ psutil.swap_memory().free, free, delta=TOLERANCE_SYS_MEM)
+
+ def test_swapmem_used(self):
+ total, used, free = self.parse_swapinfo()
+ self.assertAlmostEqual(
+ psutil.swap_memory().used, used, delta=TOLERANCE_SYS_MEM)
+
+ def test_swapmem_total(self):
+ total, used, free = self.parse_swapinfo()
+ self.assertAlmostEqual(
+ psutil.swap_memory().total, total, delta=TOLERANCE_SYS_MEM)
+
+ # --- others
+
+ def test_boot_time(self):
+ s = sysctl('sysctl kern.boottime')
+ s = s[s.find(" sec = ") + 7:]
+ s = s[:s.find(',')]
+ btime = int(s)
+ self.assertEqual(btime, psutil.boot_time())
+
+ # --- sensors_battery
+
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_sensors_battery(self):
+ def secs2hours(secs):
+ m, s = divmod(secs, 60)
+ h, m = divmod(m, 60)
+ return "%d:%02d" % (h, m)
+
+ out = sh("acpiconf -i 0")
+ fields = dict([(x.split('\t')[0], x.split('\t')[-1])
+ for x in out.split("\n")])
+ metrics = psutil.sensors_battery()
+ percent = int(fields['Remaining capacity:'].replace('%', ''))
+ remaining_time = fields['Remaining time:']
+ self.assertEqual(metrics.percent, percent)
+ if remaining_time == 'unknown':
+ self.assertEqual(metrics.secsleft, psutil.POWER_TIME_UNLIMITED)
+ else:
+ self.assertEqual(secs2hours(metrics.secsleft), remaining_time)
+
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_sensors_battery_against_sysctl(self):
+ self.assertEqual(psutil.sensors_battery().percent,
+ sysctl("hw.acpi.battery.life"))
+ self.assertEqual(psutil.sensors_battery().power_plugged,
+ sysctl("hw.acpi.acline") == 1)
+ secsleft = psutil.sensors_battery().secsleft
+ if secsleft < 0:
+ self.assertEqual(sysctl("hw.acpi.battery.time"), -1)
+ else:
+ self.assertEqual(secsleft, sysctl("hw.acpi.battery.time") * 60)
+
+ @unittest.skipIf(HAS_BATTERY, "has battery")
+ def test_sensors_battery_no_battery(self):
+ # If no battery is present one of these calls is supposed
+ # to fail, see:
+ # https://github.com/giampaolo/psutil/issues/1074
+ with self.assertRaises(RuntimeError):
+ sysctl("hw.acpi.battery.life")
+ sysctl("hw.acpi.battery.time")
+ sysctl("hw.acpi.acline")
+ self.assertIsNone(psutil.sensors_battery())
+
+ # --- sensors_temperatures
+
+ def test_sensors_temperatures_against_sysctl(self):
+ num_cpus = psutil.cpu_count(True)
+ for cpu in range(num_cpus):
+ sensor = "dev.cpu.%s.temperature" % cpu
+ # sysctl returns a string in the format 46.0C
+ try:
+ sysctl_result = int(float(sysctl(sensor)[:-1]))
+ except RuntimeError:
+ self.skipTest("temperatures not supported by kernel")
+ self.assertAlmostEqual(
+ psutil.sensors_temperatures()["coretemp"][cpu].current,
+ sysctl_result, delta=10)
+
+ sensor = "dev.cpu.%s.coretemp.tjmax" % cpu
+ sysctl_result = int(float(sysctl(sensor)[:-1]))
+ self.assertEqual(
+ psutil.sensors_temperatures()["coretemp"][cpu].high,
+ sysctl_result)
+
+
+# =====================================================================
+# --- OpenBSD
+# =====================================================================
+
+
+@unittest.skipIf(not OPENBSD, "OPENBSD only")
+class OpenBSDTestCase(PsutilTestCase):
+
+ def test_boot_time(self):
+ s = sysctl('kern.boottime')
+ sys_bt = datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y")
+ psutil_bt = datetime.datetime.fromtimestamp(psutil.boot_time())
+ self.assertEqual(sys_bt, psutil_bt)
+
+
+# =====================================================================
+# --- NetBSD
+# =====================================================================
+
+
+@unittest.skipIf(not NETBSD, "NETBSD only")
+class NetBSDTestCase(PsutilTestCase):
+
+ @staticmethod
+ def parse_meminfo(look_for):
+ with open('/proc/meminfo', 'rt') as f:
+ for line in f:
+ if line.startswith(look_for):
+ return int(line.split()[1]) * 1024
+ raise ValueError("can't find %s" % look_for)
+
+ def test_vmem_total(self):
+ self.assertEqual(
+ psutil.virtual_memory().total, self.parse_meminfo("MemTotal:"))
+
+ def test_vmem_free(self):
+ self.assertAlmostEqual(
+ psutil.virtual_memory().free, self.parse_meminfo("MemFree:"),
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_vmem_buffers(self):
+ self.assertAlmostEqual(
+ psutil.virtual_memory().buffers, self.parse_meminfo("Buffers:"),
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_vmem_shared(self):
+ self.assertAlmostEqual(
+ psutil.virtual_memory().shared, self.parse_meminfo("MemShared:"),
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_swapmem_total(self):
+ self.assertAlmostEqual(
+ psutil.swap_memory().total, self.parse_meminfo("SwapTotal:"),
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_swapmem_free(self):
+ self.assertAlmostEqual(
+ psutil.swap_memory().free, self.parse_meminfo("SwapFree:"),
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_swapmem_used(self):
+ smem = psutil.swap_memory()
+ self.assertEqual(smem.used, smem.total - smem.free)
+
+ def test_cpu_stats_interrupts(self):
+ with open('/proc/stat', 'rb') as f:
+ for line in f:
+ if line.startswith(b'intr'):
+ interrupts = int(line.split()[1])
+ break
+ else:
+ raise ValueError("couldn't find line")
+ self.assertAlmostEqual(
+ psutil.cpu_stats().interrupts, interrupts, delta=1000)
+
+ def test_cpu_stats_ctx_switches(self):
+ with open('/proc/stat', 'rb') as f:
+ for line in f:
+ if line.startswith(b'ctxt'):
+ ctx_switches = int(line.split()[1])
+ break
+ else:
+ raise ValueError("couldn't find line")
+ self.assertAlmostEqual(
+ psutil.cpu_stats().ctx_switches, ctx_switches, delta=1000)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_connections.py b/lib/psutil/tests/test_connections.py
new file mode 100644
index 0000000..f3b1f83
--- /dev/null
+++ b/lib/psutil/tests/test_connections.py
@@ -0,0 +1,554 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for net_connections() and Process.connections() APIs."""
+
+import os
+import socket
+import textwrap
+import unittest
+from contextlib import closing
+from socket import AF_INET
+from socket import AF_INET6
+from socket import SOCK_DGRAM
+from socket import SOCK_STREAM
+
+import psutil
+from psutil import FREEBSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import NETBSD
+from psutil import OPENBSD
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._common import supports_ipv6
+from psutil._compat import PY3
+from psutil.tests import AF_UNIX
+from psutil.tests import HAS_CONNECTIONS_UNIX
+from psutil.tests import SKIP_SYSCONS
+from psutil.tests import PsutilTestCase
+from psutil.tests import bind_socket
+from psutil.tests import bind_unix_socket
+from psutil.tests import check_connection_ntuple
+from psutil.tests import create_sockets
+from psutil.tests import reap_children
+from psutil.tests import retry_on_failure
+from psutil.tests import serialrun
+from psutil.tests import skip_on_access_denied
+from psutil.tests import tcp_socketpair
+from psutil.tests import unix_socketpair
+from psutil.tests import wait_for_file
+
+
+thisproc = psutil.Process()
+SOCK_SEQPACKET = getattr(socket, "SOCK_SEQPACKET", object())
+
+
+@serialrun
+class ConnectionTestCase(PsutilTestCase):
+
+ def setUp(self):
+ if not (NETBSD or FREEBSD):
+ # process opens a UNIX socket to /var/log/run.
+ cons = thisproc.connections(kind='all')
+ assert not cons, cons
+
+ def tearDown(self):
+ if not (FREEBSD or NETBSD):
+ # Make sure we closed all resources.
+ # NetBSD opens a UNIX socket to /var/log/run.
+ cons = thisproc.connections(kind='all')
+ assert not cons, cons
+
+ def compare_procsys_connections(self, pid, proc_cons, kind='all'):
+ """Given a process PID and its list of connections compare
+ those against system-wide connections retrieved via
+ psutil.net_connections.
+ """
+ try:
+ sys_cons = psutil.net_connections(kind=kind)
+ except psutil.AccessDenied:
+ # On MACOS, system-wide connections are retrieved by iterating
+ # over all processes
+ if MACOS:
+ return
+ else:
+ raise
+ # Filter for this proc PID and exlucde PIDs from the tuple.
+ sys_cons = [c[:-1] for c in sys_cons if c.pid == pid]
+ sys_cons.sort()
+ proc_cons.sort()
+ self.assertEqual(proc_cons, sys_cons)
+
+
+class TestBasicOperations(ConnectionTestCase):
+
+ @unittest.skipIf(SKIP_SYSCONS, "requires root")
+ def test_system(self):
+ with create_sockets():
+ for conn in psutil.net_connections(kind='all'):
+ check_connection_ntuple(conn)
+
+ def test_process(self):
+ with create_sockets():
+ for conn in psutil.Process().connections(kind='all'):
+ check_connection_ntuple(conn)
+
+ def test_invalid_kind(self):
+ self.assertRaises(ValueError, thisproc.connections, kind='???')
+ self.assertRaises(ValueError, psutil.net_connections, kind='???')
+
+
+@serialrun
+class TestUnconnectedSockets(ConnectionTestCase):
+ """Tests sockets which are open but not connected to anything."""
+
+ def get_conn_from_sock(self, sock):
+ cons = thisproc.connections(kind='all')
+ smap = dict([(c.fd, c) for c in cons])
+ if NETBSD or FREEBSD:
+ # NetBSD opens a UNIX socket to /var/log/run
+ # so there may be more connections.
+ return smap[sock.fileno()]
+ else:
+ self.assertEqual(len(cons), 1)
+ if cons[0].fd != -1:
+ self.assertEqual(smap[sock.fileno()].fd, sock.fileno())
+ return cons[0]
+
+ def check_socket(self, sock):
+ """Given a socket, makes sure it matches the one obtained
+ via psutil. It assumes this process created one connection
+ only (the one supposed to be checked).
+ """
+ conn = self.get_conn_from_sock(sock)
+ check_connection_ntuple(conn)
+
+ # fd, family, type
+ if conn.fd != -1:
+ self.assertEqual(conn.fd, sock.fileno())
+ self.assertEqual(conn.family, sock.family)
+ # see: http://bugs.python.org/issue30204
+ self.assertEqual(
+ conn.type, sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE))
+
+ # local address
+ laddr = sock.getsockname()
+ if not laddr and PY3 and isinstance(laddr, bytes):
+ # See: http://bugs.python.org/issue30205
+ laddr = laddr.decode()
+ if sock.family == AF_INET6:
+ laddr = laddr[:2]
+ if sock.family == AF_UNIX and OPENBSD:
+ # No addresses are set for UNIX sockets on OpenBSD.
+ pass
+ else:
+ self.assertEqual(conn.laddr, laddr)
+
+ # XXX Solaris can't retrieve system-wide UNIX sockets
+ if sock.family == AF_UNIX and HAS_CONNECTIONS_UNIX:
+ cons = thisproc.connections(kind='all')
+ self.compare_procsys_connections(os.getpid(), cons, kind='all')
+ return conn
+
+ def test_tcp_v4(self):
+ addr = ("127.0.0.1", 0)
+ with closing(bind_socket(AF_INET, SOCK_STREAM, addr=addr)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_LISTEN)
+
+ @unittest.skipIf(not supports_ipv6(), "IPv6 not supported")
+ def test_tcp_v6(self):
+ addr = ("::1", 0)
+ with closing(bind_socket(AF_INET6, SOCK_STREAM, addr=addr)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_LISTEN)
+
+ def test_udp_v4(self):
+ addr = ("127.0.0.1", 0)
+ with closing(bind_socket(AF_INET, SOCK_DGRAM, addr=addr)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_NONE)
+
+ @unittest.skipIf(not supports_ipv6(), "IPv6 not supported")
+ def test_udp_v6(self):
+ addr = ("::1", 0)
+ with closing(bind_socket(AF_INET6, SOCK_DGRAM, addr=addr)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_NONE)
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_unix_tcp(self):
+ testfn = self.get_testfn()
+ with closing(bind_unix_socket(testfn, type=SOCK_STREAM)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_NONE)
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_unix_udp(self):
+ testfn = self.get_testfn()
+ with closing(bind_unix_socket(testfn, type=SOCK_STREAM)) as sock:
+ conn = self.check_socket(sock)
+ assert not conn.raddr
+ self.assertEqual(conn.status, psutil.CONN_NONE)
+
+
+@serialrun
+class TestConnectedSocket(ConnectionTestCase):
+ """Test socket pairs which are are actually connected to
+ each other.
+ """
+
+ # On SunOS, even after we close() it, the server socket stays around
+ # in TIME_WAIT state.
+ @unittest.skipIf(SUNOS, "unreliable on SUONS")
+ def test_tcp(self):
+ addr = ("127.0.0.1", 0)
+ assert not thisproc.connections(kind='tcp4')
+ server, client = tcp_socketpair(AF_INET, addr=addr)
+ try:
+ cons = thisproc.connections(kind='tcp4')
+ self.assertEqual(len(cons), 2)
+ self.assertEqual(cons[0].status, psutil.CONN_ESTABLISHED)
+ self.assertEqual(cons[1].status, psutil.CONN_ESTABLISHED)
+ # May not be fast enough to change state so it stays
+ # commenteed.
+ # client.close()
+ # cons = thisproc.connections(kind='all')
+ # self.assertEqual(len(cons), 1)
+ # self.assertEqual(cons[0].status, psutil.CONN_CLOSE_WAIT)
+ finally:
+ server.close()
+ client.close()
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_unix(self):
+ testfn = self.get_testfn()
+ server, client = unix_socketpair(testfn)
+ try:
+ cons = thisproc.connections(kind='unix')
+ assert not (cons[0].laddr and cons[0].raddr)
+ assert not (cons[1].laddr and cons[1].raddr)
+ if NETBSD or FREEBSD:
+ # On NetBSD creating a UNIX socket will cause
+ # a UNIX connection to /var/run/log.
+ cons = [c for c in cons if c.raddr != '/var/run/log']
+ self.assertEqual(len(cons), 2, msg=cons)
+ if LINUX or FREEBSD or SUNOS:
+ # remote path is never set
+ self.assertEqual(cons[0].raddr, "")
+ self.assertEqual(cons[1].raddr, "")
+ # one local address should though
+ self.assertEqual(testfn, cons[0].laddr or cons[1].laddr)
+ elif OPENBSD:
+ # No addresses whatsoever here.
+ for addr in (cons[0].laddr, cons[0].raddr,
+ cons[1].laddr, cons[1].raddr):
+ self.assertEqual(addr, "")
+ else:
+ # On other systems either the laddr or raddr
+ # of both peers are set.
+ self.assertEqual(cons[0].laddr or cons[1].laddr, testfn)
+ self.assertEqual(cons[0].raddr or cons[1].raddr, testfn)
+ finally:
+ server.close()
+ client.close()
+
+
+class TestFilters(ConnectionTestCase):
+
+ def test_filters(self):
+ def check(kind, families, types):
+ for conn in thisproc.connections(kind=kind):
+ self.assertIn(conn.family, families)
+ self.assertIn(conn.type, types)
+ if not SKIP_SYSCONS:
+ for conn in psutil.net_connections(kind=kind):
+ self.assertIn(conn.family, families)
+ self.assertIn(conn.type, types)
+
+ with create_sockets():
+ check('all',
+ [AF_INET, AF_INET6, AF_UNIX],
+ [SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET])
+ check('inet',
+ [AF_INET, AF_INET6],
+ [SOCK_STREAM, SOCK_DGRAM])
+ check('inet4',
+ [AF_INET],
+ [SOCK_STREAM, SOCK_DGRAM])
+ check('tcp',
+ [AF_INET, AF_INET6],
+ [SOCK_STREAM])
+ check('tcp4',
+ [AF_INET],
+ [SOCK_STREAM])
+ check('tcp6',
+ [AF_INET6],
+ [SOCK_STREAM])
+ check('udp',
+ [AF_INET, AF_INET6],
+ [SOCK_DGRAM])
+ check('udp4',
+ [AF_INET],
+ [SOCK_DGRAM])
+ check('udp6',
+ [AF_INET6],
+ [SOCK_DGRAM])
+ if HAS_CONNECTIONS_UNIX:
+ check('unix',
+ [AF_UNIX],
+ [SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET])
+
+ @skip_on_access_denied(only_if=MACOS)
+ def test_combos(self):
+ reap_children()
+
+ def check_conn(proc, conn, family, type, laddr, raddr, status, kinds):
+ all_kinds = ("all", "inet", "inet4", "inet6", "tcp", "tcp4",
+ "tcp6", "udp", "udp4", "udp6")
+ check_connection_ntuple(conn)
+ self.assertEqual(conn.family, family)
+ self.assertEqual(conn.type, type)
+ self.assertEqual(conn.laddr, laddr)
+ self.assertEqual(conn.raddr, raddr)
+ self.assertEqual(conn.status, status)
+ for kind in all_kinds:
+ cons = proc.connections(kind=kind)
+ if kind in kinds:
+ assert cons
+ else:
+ assert not cons, cons
+ # compare against system-wide connections
+ # XXX Solaris can't retrieve system-wide UNIX
+ # sockets.
+ if HAS_CONNECTIONS_UNIX:
+ self.compare_procsys_connections(proc.pid, [conn])
+
+ tcp_template = textwrap.dedent("""
+ import socket, time
+ s = socket.socket({family}, socket.SOCK_STREAM)
+ s.bind(('{addr}', 0))
+ s.listen(5)
+ with open('{testfn}', 'w') as f:
+ f.write(str(s.getsockname()[:2]))
+ time.sleep(60)
+ """)
+
+ udp_template = textwrap.dedent("""
+ import socket, time
+ s = socket.socket({family}, socket.SOCK_DGRAM)
+ s.bind(('{addr}', 0))
+ with open('{testfn}', 'w') as f:
+ f.write(str(s.getsockname()[:2]))
+ time.sleep(60)
+ """)
+
+ # must be relative on Windows
+ testfile = os.path.basename(self.get_testfn(dir=os.getcwd()))
+ tcp4_template = tcp_template.format(
+ family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
+ udp4_template = udp_template.format(
+ family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
+ tcp6_template = tcp_template.format(
+ family=int(AF_INET6), addr="::1", testfn=testfile)
+ udp6_template = udp_template.format(
+ family=int(AF_INET6), addr="::1", testfn=testfile)
+
+ # launch various subprocess instantiating a socket of various
+ # families and types to enrich psutil results
+ tcp4_proc = self.pyrun(tcp4_template)
+ tcp4_addr = eval(wait_for_file(testfile, delete=True))
+ udp4_proc = self.pyrun(udp4_template)
+ udp4_addr = eval(wait_for_file(testfile, delete=True))
+ if supports_ipv6():
+ tcp6_proc = self.pyrun(tcp6_template)
+ tcp6_addr = eval(wait_for_file(testfile, delete=True))
+ udp6_proc = self.pyrun(udp6_template)
+ udp6_addr = eval(wait_for_file(testfile, delete=True))
+ else:
+ tcp6_proc = None
+ udp6_proc = None
+ tcp6_addr = None
+ udp6_addr = None
+
+ for p in thisproc.children():
+ cons = p.connections()
+ self.assertEqual(len(cons), 1)
+ for conn in cons:
+ # TCP v4
+ if p.pid == tcp4_proc.pid:
+ check_conn(p, conn, AF_INET, SOCK_STREAM, tcp4_addr, (),
+ psutil.CONN_LISTEN,
+ ("all", "inet", "inet4", "tcp", "tcp4"))
+ # UDP v4
+ elif p.pid == udp4_proc.pid:
+ check_conn(p, conn, AF_INET, SOCK_DGRAM, udp4_addr, (),
+ psutil.CONN_NONE,
+ ("all", "inet", "inet4", "udp", "udp4"))
+ # TCP v6
+ elif p.pid == getattr(tcp6_proc, "pid", None):
+ check_conn(p, conn, AF_INET6, SOCK_STREAM, tcp6_addr, (),
+ psutil.CONN_LISTEN,
+ ("all", "inet", "inet6", "tcp", "tcp6"))
+ # UDP v6
+ elif p.pid == getattr(udp6_proc, "pid", None):
+ check_conn(p, conn, AF_INET6, SOCK_DGRAM, udp6_addr, (),
+ psutil.CONN_NONE,
+ ("all", "inet", "inet6", "udp", "udp6"))
+
+ def test_count(self):
+ with create_sockets():
+ # tcp
+ cons = thisproc.connections(kind='tcp')
+ self.assertEqual(len(cons), 2 if supports_ipv6() else 1)
+ for conn in cons:
+ self.assertIn(conn.family, (AF_INET, AF_INET6))
+ self.assertEqual(conn.type, SOCK_STREAM)
+ # tcp4
+ cons = thisproc.connections(kind='tcp4')
+ self.assertEqual(len(cons), 1)
+ self.assertEqual(cons[0].family, AF_INET)
+ self.assertEqual(cons[0].type, SOCK_STREAM)
+ # tcp6
+ if supports_ipv6():
+ cons = thisproc.connections(kind='tcp6')
+ self.assertEqual(len(cons), 1)
+ self.assertEqual(cons[0].family, AF_INET6)
+ self.assertEqual(cons[0].type, SOCK_STREAM)
+ # udp
+ cons = thisproc.connections(kind='udp')
+ self.assertEqual(len(cons), 2 if supports_ipv6() else 1)
+ for conn in cons:
+ self.assertIn(conn.family, (AF_INET, AF_INET6))
+ self.assertEqual(conn.type, SOCK_DGRAM)
+ # udp4
+ cons = thisproc.connections(kind='udp4')
+ self.assertEqual(len(cons), 1)
+ self.assertEqual(cons[0].family, AF_INET)
+ self.assertEqual(cons[0].type, SOCK_DGRAM)
+ # udp6
+ if supports_ipv6():
+ cons = thisproc.connections(kind='udp6')
+ self.assertEqual(len(cons), 1)
+ self.assertEqual(cons[0].family, AF_INET6)
+ self.assertEqual(cons[0].type, SOCK_DGRAM)
+ # inet
+ cons = thisproc.connections(kind='inet')
+ self.assertEqual(len(cons), 4 if supports_ipv6() else 2)
+ for conn in cons:
+ self.assertIn(conn.family, (AF_INET, AF_INET6))
+ self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM))
+ # inet6
+ if supports_ipv6():
+ cons = thisproc.connections(kind='inet6')
+ self.assertEqual(len(cons), 2)
+ for conn in cons:
+ self.assertEqual(conn.family, AF_INET6)
+ self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM))
+ # Skipped on BSD becayse by default the Python process
+ # creates a UNIX socket to '/var/run/log'.
+ if HAS_CONNECTIONS_UNIX and not (FREEBSD or NETBSD):
+ cons = thisproc.connections(kind='unix')
+ self.assertEqual(len(cons), 3)
+ for conn in cons:
+ self.assertEqual(conn.family, AF_UNIX)
+ self.assertIn(conn.type, (SOCK_STREAM, SOCK_DGRAM))
+
+
+@unittest.skipIf(SKIP_SYSCONS, "requires root")
+class TestSystemWideConnections(ConnectionTestCase):
+ """Tests for net_connections()."""
+
+ def test_it(self):
+ def check(cons, families, types_):
+ for conn in cons:
+ self.assertIn(conn.family, families, msg=conn)
+ if conn.family != AF_UNIX:
+ self.assertIn(conn.type, types_, msg=conn)
+ check_connection_ntuple(conn)
+
+ with create_sockets():
+ from psutil._common import conn_tmap
+ for kind, groups in conn_tmap.items():
+ # XXX: SunOS does not retrieve UNIX sockets.
+ if kind == 'unix' and not HAS_CONNECTIONS_UNIX:
+ continue
+ families, types_ = groups
+ cons = psutil.net_connections(kind)
+ self.assertEqual(len(cons), len(set(cons)))
+ check(cons, families, types_)
+
+ @retry_on_failure()
+ def test_multi_sockets_procs(self):
+ # Creates multiple sub processes, each creating different
+ # sockets. For each process check that proc.connections()
+ # and net_connections() return the same results.
+ # This is done mainly to check whether net_connections()'s
+ # pid is properly set, see:
+ # https://github.com/giampaolo/psutil/issues/1013
+ with create_sockets() as socks:
+ expected = len(socks)
+ pids = []
+ times = 10
+ fnames = []
+ for i in range(times):
+ fname = self.get_testfn()
+ fnames.append(fname)
+ src = textwrap.dedent("""\
+ import time, os
+ from psutil.tests import create_sockets
+ with create_sockets():
+ with open(r'%s', 'w') as f:
+ f.write("hello")
+ time.sleep(60)
+ """ % fname)
+ sproc = self.pyrun(src)
+ pids.append(sproc.pid)
+
+ # sync
+ for fname in fnames:
+ wait_for_file(fname)
+
+ syscons = [x for x in psutil.net_connections(kind='all') if x.pid
+ in pids]
+ for pid in pids:
+ self.assertEqual(len([x for x in syscons if x.pid == pid]),
+ expected)
+ p = psutil.Process(pid)
+ self.assertEqual(len(p.connections('all')), expected)
+
+
+class TestMisc(PsutilTestCase):
+
+ def test_connection_constants(self):
+ ints = []
+ strs = []
+ for name in dir(psutil):
+ if name.startswith('CONN_'):
+ num = getattr(psutil, name)
+ str_ = str(num)
+ assert str_.isupper(), str_
+ self.assertNotIn(str, strs)
+ self.assertNotIn(num, ints)
+ ints.append(num)
+ strs.append(str_)
+ if SUNOS:
+ psutil.CONN_IDLE
+ psutil.CONN_BOUND
+ if WINDOWS:
+ psutil.CONN_DELETE_TCB
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_contracts.py b/lib/psutil/tests/test_contracts.py
new file mode 100644
index 0000000..3b806ee
--- /dev/null
+++ b/lib/psutil/tests/test_contracts.py
@@ -0,0 +1,751 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Contracts tests. These tests mainly check API sanity in terms of
+returned types and APIs availability.
+Some of these are duplicates of tests test_system.py and test_process.py
+"""
+
+import errno
+import multiprocessing
+import os
+import platform
+import signal
+import stat
+import sys
+import time
+import traceback
+import unittest
+
+import psutil
+from psutil import AIX
+from psutil import BSD
+from psutil import FREEBSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import NETBSD
+from psutil import OPENBSD
+from psutil import OSX
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._compat import FileNotFoundError
+from psutil._compat import long
+from psutil._compat import range
+from psutil._compat import unicode
+from psutil.tests import APPVEYOR
+from psutil.tests import CI_TESTING
+from psutil.tests import GITHUB_ACTIONS
+from psutil.tests import HAS_CPU_FREQ
+from psutil.tests import HAS_NET_IO_COUNTERS
+from psutil.tests import HAS_SENSORS_FANS
+from psutil.tests import HAS_SENSORS_TEMPERATURES
+from psutil.tests import PYPY
+from psutil.tests import SKIP_SYSCONS
+from psutil.tests import VALID_PROC_STATUSES
+from psutil.tests import PsutilTestCase
+from psutil.tests import check_connection_ntuple
+from psutil.tests import create_sockets
+from psutil.tests import enum
+from psutil.tests import is_namedtuple
+from psutil.tests import kernel_version
+from psutil.tests import process_namespace
+from psutil.tests import serialrun
+
+
+# ===================================================================
+# --- APIs availability
+# ===================================================================
+
+# Make sure code reflects what doc promises in terms of APIs
+# availability.
+
+class TestAvailConstantsAPIs(PsutilTestCase):
+
+ def test_PROCFS_PATH(self):
+ self.assertEqual(hasattr(psutil, "PROCFS_PATH"),
+ LINUX or SUNOS or AIX)
+
+ def test_win_priority(self):
+ ae = self.assertEqual
+ ae(hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS"), WINDOWS)
+ ae(hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS"), WINDOWS)
+ ae(hasattr(psutil, "HIGH_PRIORITY_CLASS"), WINDOWS)
+ ae(hasattr(psutil, "IDLE_PRIORITY_CLASS"), WINDOWS)
+ ae(hasattr(psutil, "NORMAL_PRIORITY_CLASS"), WINDOWS)
+ ae(hasattr(psutil, "REALTIME_PRIORITY_CLASS"), WINDOWS)
+
+ def test_linux_ioprio_linux(self):
+ ae = self.assertEqual
+ ae(hasattr(psutil, "IOPRIO_CLASS_NONE"), LINUX)
+ ae(hasattr(psutil, "IOPRIO_CLASS_RT"), LINUX)
+ ae(hasattr(psutil, "IOPRIO_CLASS_BE"), LINUX)
+ ae(hasattr(psutil, "IOPRIO_CLASS_IDLE"), LINUX)
+
+ def test_linux_ioprio_windows(self):
+ ae = self.assertEqual
+ ae(hasattr(psutil, "IOPRIO_HIGH"), WINDOWS)
+ ae(hasattr(psutil, "IOPRIO_NORMAL"), WINDOWS)
+ ae(hasattr(psutil, "IOPRIO_LOW"), WINDOWS)
+ ae(hasattr(psutil, "IOPRIO_VERYLOW"), WINDOWS)
+
+ @unittest.skipIf(GITHUB_ACTIONS and LINUX,
+ "unsupported on GITHUB_ACTIONS + LINUX")
+ def test_rlimit(self):
+ ae = self.assertEqual
+ ae(hasattr(psutil, "RLIM_INFINITY"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_AS"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_CORE"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_CPU"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_DATA"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_FSIZE"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_MEMLOCK"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_NOFILE"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_NPROC"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_RSS"), LINUX or FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_STACK"), LINUX or FREEBSD)
+
+ ae(hasattr(psutil, "RLIMIT_LOCKS"), LINUX)
+ if POSIX:
+ if kernel_version() >= (2, 6, 8):
+ ae(hasattr(psutil, "RLIMIT_MSGQUEUE"), LINUX)
+ if kernel_version() >= (2, 6, 12):
+ ae(hasattr(psutil, "RLIMIT_NICE"), LINUX)
+ if kernel_version() >= (2, 6, 12):
+ ae(hasattr(psutil, "RLIMIT_RTPRIO"), LINUX)
+ if kernel_version() >= (2, 6, 25):
+ ae(hasattr(psutil, "RLIMIT_RTTIME"), LINUX)
+ if kernel_version() >= (2, 6, 8):
+ ae(hasattr(psutil, "RLIMIT_SIGPENDING"), LINUX)
+
+ ae(hasattr(psutil, "RLIMIT_SWAP"), FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_SBSIZE"), FREEBSD)
+ ae(hasattr(psutil, "RLIMIT_NPTS"), FREEBSD)
+
+
+class TestAvailSystemAPIs(PsutilTestCase):
+
+ def test_win_service_iter(self):
+ self.assertEqual(hasattr(psutil, "win_service_iter"), WINDOWS)
+
+ def test_win_service_get(self):
+ self.assertEqual(hasattr(psutil, "win_service_get"), WINDOWS)
+
+ def test_cpu_freq(self):
+ self.assertEqual(hasattr(psutil, "cpu_freq"),
+ LINUX or MACOS or WINDOWS or FREEBSD or OPENBSD)
+
+ def test_sensors_temperatures(self):
+ self.assertEqual(
+ hasattr(psutil, "sensors_temperatures"), LINUX or FREEBSD)
+
+ def test_sensors_fans(self):
+ self.assertEqual(hasattr(psutil, "sensors_fans"), LINUX)
+
+ def test_battery(self):
+ self.assertEqual(hasattr(psutil, "sensors_battery"),
+ LINUX or WINDOWS or FREEBSD or MACOS)
+
+
+class TestAvailProcessAPIs(PsutilTestCase):
+
+ def test_environ(self):
+ self.assertEqual(hasattr(psutil.Process, "environ"),
+ LINUX or MACOS or WINDOWS or AIX or SUNOS or
+ FREEBSD or OPENBSD or NETBSD)
+
+ def test_uids(self):
+ self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
+
+ def test_gids(self):
+ self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
+
+ def test_terminal(self):
+ self.assertEqual(hasattr(psutil.Process, "terminal"), POSIX)
+
+ def test_ionice(self):
+ self.assertEqual(hasattr(psutil.Process, "ionice"), LINUX or WINDOWS)
+
+ @unittest.skipIf(GITHUB_ACTIONS and LINUX,
+ "unsupported on GITHUB_ACTIONS + LINUX")
+ def test_rlimit(self):
+ self.assertEqual(hasattr(psutil.Process, "rlimit"), LINUX or FREEBSD)
+
+ def test_io_counters(self):
+ hasit = hasattr(psutil.Process, "io_counters")
+ self.assertEqual(hasit, False if MACOS or SUNOS else True)
+
+ def test_num_fds(self):
+ self.assertEqual(hasattr(psutil.Process, "num_fds"), POSIX)
+
+ def test_num_handles(self):
+ self.assertEqual(hasattr(psutil.Process, "num_handles"), WINDOWS)
+
+ def test_cpu_affinity(self):
+ self.assertEqual(hasattr(psutil.Process, "cpu_affinity"),
+ LINUX or WINDOWS or FREEBSD)
+
+ def test_cpu_num(self):
+ self.assertEqual(hasattr(psutil.Process, "cpu_num"),
+ LINUX or FREEBSD or SUNOS)
+
+ def test_memory_maps(self):
+ hasit = hasattr(psutil.Process, "memory_maps")
+ self.assertEqual(
+ hasit, False if OPENBSD or NETBSD or AIX or MACOS else True)
+
+
+# ===================================================================
+# --- API types
+# ===================================================================
+
+
+class TestSystemAPITypes(PsutilTestCase):
+ """Check the return types of system related APIs.
+ Mainly we want to test we never return unicode on Python 2, see:
+ https://github.com/giampaolo/psutil/issues/1039
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.proc = psutil.Process()
+
+ def assert_ntuple_of_nums(self, nt, type_=float, gezero=True):
+ assert is_namedtuple(nt)
+ for n in nt:
+ self.assertIsInstance(n, type_)
+ if gezero:
+ self.assertGreaterEqual(n, 0)
+
+ def test_cpu_times(self):
+ self.assert_ntuple_of_nums(psutil.cpu_times())
+ for nt in psutil.cpu_times(percpu=True):
+ self.assert_ntuple_of_nums(nt)
+
+ def test_cpu_percent(self):
+ self.assertIsInstance(psutil.cpu_percent(interval=None), float)
+ self.assertIsInstance(psutil.cpu_percent(interval=0.00001), float)
+
+ def test_cpu_times_percent(self):
+ self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=None))
+ self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=0.0001))
+
+ def test_cpu_count(self):
+ self.assertIsInstance(psutil.cpu_count(), int)
+
+ # TODO: remove this once 1892 is fixed
+ @unittest.skipIf(MACOS and platform.machine() == 'arm64',
+ "skipped due to #1892")
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_cpu_freq(self):
+ if psutil.cpu_freq() is None:
+ raise self.skipTest("cpu_freq() returns None")
+ self.assert_ntuple_of_nums(psutil.cpu_freq(), type_=(float, int, long))
+
+ def test_disk_io_counters(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for k, v in psutil.disk_io_counters(perdisk=True).items():
+ self.assertIsInstance(k, str)
+ self.assert_ntuple_of_nums(v, type_=(int, long))
+
+ def test_disk_partitions(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for disk in psutil.disk_partitions():
+ self.assertIsInstance(disk.device, str)
+ self.assertIsInstance(disk.mountpoint, str)
+ self.assertIsInstance(disk.fstype, str)
+ self.assertIsInstance(disk.opts, str)
+ self.assertIsInstance(disk.maxfile, int)
+ self.assertIsInstance(disk.maxpath, int)
+
+ @unittest.skipIf(SKIP_SYSCONS, "requires root")
+ def test_net_connections(self):
+ with create_sockets():
+ ret = psutil.net_connections('all')
+ self.assertEqual(len(ret), len(set(ret)))
+ for conn in ret:
+ assert is_namedtuple(conn)
+
+ def test_net_if_addrs(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for ifname, addrs in psutil.net_if_addrs().items():
+ self.assertIsInstance(ifname, str)
+ for addr in addrs:
+ if enum is not None and not PYPY:
+ self.assertIsInstance(addr.family, enum.IntEnum)
+ else:
+ self.assertIsInstance(addr.family, int)
+ self.assertIsInstance(addr.address, str)
+ self.assertIsInstance(addr.netmask, (str, type(None)))
+ self.assertIsInstance(addr.broadcast, (str, type(None)))
+
+ def test_net_if_stats(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for ifname, info in psutil.net_if_stats().items():
+ self.assertIsInstance(ifname, str)
+ self.assertIsInstance(info.isup, bool)
+ if enum is not None:
+ self.assertIsInstance(info.duplex, enum.IntEnum)
+ else:
+ self.assertIsInstance(info.duplex, int)
+ self.assertIsInstance(info.speed, int)
+ self.assertIsInstance(info.mtu, int)
+
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
+ def test_net_io_counters(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for ifname, _ in psutil.net_io_counters(pernic=True).items():
+ self.assertIsInstance(ifname, str)
+
+ @unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
+ def test_sensors_fans(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for name, units in psutil.sensors_fans().items():
+ self.assertIsInstance(name, str)
+ for unit in units:
+ self.assertIsInstance(unit.label, str)
+ self.assertIsInstance(unit.current, (float, int, type(None)))
+
+ @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
+ def test_sensors_temperatures(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for name, units in psutil.sensors_temperatures().items():
+ self.assertIsInstance(name, str)
+ for unit in units:
+ self.assertIsInstance(unit.label, str)
+ self.assertIsInstance(unit.current, (float, int, type(None)))
+ self.assertIsInstance(unit.high, (float, int, type(None)))
+ self.assertIsInstance(unit.critical, (float, int, type(None)))
+
+ def test_boot_time(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ self.assertIsInstance(psutil.boot_time(), float)
+
+ def test_users(self):
+ # Duplicate of test_system.py. Keep it anyway.
+ for user in psutil.users():
+ self.assertIsInstance(user.name, str)
+ self.assertIsInstance(user.terminal, (str, type(None)))
+ self.assertIsInstance(user.host, (str, type(None)))
+ self.assertIsInstance(user.pid, (int, type(None)))
+
+
+class TestProcessWaitType(PsutilTestCase):
+
+ @unittest.skipIf(not POSIX, "not POSIX")
+ def test_negative_signal(self):
+ p = psutil.Process(self.spawn_testproc().pid)
+ p.terminate()
+ code = p.wait()
+ self.assertEqual(code, -signal.SIGTERM)
+ if enum is not None:
+ self.assertIsInstance(code, enum.IntEnum)
+ else:
+ self.assertIsInstance(code, int)
+
+
+# ===================================================================
+# --- Featch all processes test
+# ===================================================================
+
+
+def proc_info(pid):
+ tcase = PsutilTestCase()
+
+ def check_exception(exc, proc, name, ppid):
+ tcase.assertEqual(exc.pid, pid)
+ tcase.assertEqual(exc.name, name)
+ if isinstance(exc, psutil.ZombieProcess):
+ if exc.ppid is not None:
+ tcase.assertGreaterEqual(exc.ppid, 0)
+ tcase.assertEqual(exc.ppid, ppid)
+ elif isinstance(exc, psutil.NoSuchProcess):
+ tcase.assertProcessGone(proc)
+ str(exc)
+
+ def do_wait():
+ if pid != 0:
+ try:
+ proc.wait(0)
+ except psutil.Error as exc:
+ check_exception(exc, proc, name, ppid)
+
+ try:
+ proc = psutil.Process(pid)
+ d = proc.as_dict(['ppid', 'name'])
+ except psutil.NoSuchProcess:
+ return {}
+
+ name, ppid = d['name'], d['ppid']
+ info = {'pid': proc.pid}
+ ns = process_namespace(proc)
+ # We don't use oneshot() because in order not to fool
+ # check_exception() in case of NSP.
+ for fun, fun_name in ns.iter(ns.getters, clear_cache=False):
+ try:
+ info[fun_name] = fun()
+ except psutil.Error as exc:
+ check_exception(exc, proc, name, ppid)
+ continue
+ do_wait()
+ return info
+
+
+@serialrun
+class TestFetchAllProcesses(PsutilTestCase):
+ """Test which iterates over all running processes and performs
+ some sanity checks against Process API's returned values.
+ Uses a process pool to get info about all processes.
+ """
+
+ def setUp(self):
+ # Using a pool in a CI env may result in deadlock, see:
+ # https://github.com/giampaolo/psutil/issues/2104
+ if not CI_TESTING:
+ self.pool = multiprocessing.Pool()
+
+ def tearDown(self):
+ if not CI_TESTING:
+ self.pool.terminate()
+ self.pool.join()
+
+ def iter_proc_info(self):
+ # Fixes "can't pickle <function proc_info>: it's not the
+ # same object as test_contracts.proc_info".
+ from psutil.tests.test_contracts import proc_info
+
+ if not CI_TESTING:
+ return self.pool.imap_unordered(proc_info, psutil.pids())
+ else:
+ ls = []
+ for pid in psutil.pids():
+ ls.append(proc_info(pid))
+ return ls
+
+ def test_all(self):
+ failures = []
+ for info in self.iter_proc_info():
+ for name, value in info.items():
+ meth = getattr(self, name)
+ try:
+ meth(value, info)
+ except AssertionError:
+ s = '\n' + '=' * 70 + '\n'
+ s += "FAIL: test_%s pid=%s, ret=%s\n" % (
+ name, info['pid'], repr(value))
+ s += '-' * 70
+ s += "\n%s" % traceback.format_exc()
+ s = "\n".join((" " * 4) + i for i in s.splitlines())
+ s += '\n'
+ failures.append(s)
+ else:
+ if value not in (0, 0.0, [], None, '', {}):
+ assert value, value
+ if failures:
+ raise self.fail(''.join(failures))
+
+ def cmdline(self, ret, info):
+ self.assertIsInstance(ret, list)
+ for part in ret:
+ self.assertIsInstance(part, str)
+
+ def exe(self, ret, info):
+ self.assertIsInstance(ret, (str, unicode, type(None)))
+ if not ret:
+ self.assertEqual(ret, '')
+ else:
+ if WINDOWS and not ret.endswith('.exe'):
+ return # May be "Registry", "MemCompression", ...
+ assert os.path.isabs(ret), ret
+ # Note: os.stat() may return False even if the file is there
+ # hence we skip the test, see:
+ # http://stackoverflow.com/questions/3112546/os-path-exists-lies
+ if POSIX and os.path.isfile(ret):
+ if hasattr(os, 'access') and hasattr(os, "X_OK"):
+ # XXX: may fail on MACOS
+ try:
+ assert os.access(ret, os.X_OK)
+ except AssertionError:
+ if os.path.exists(ret) and not CI_TESTING:
+ raise
+
+ def pid(self, ret, info):
+ self.assertIsInstance(ret, int)
+ self.assertGreaterEqual(ret, 0)
+
+ def ppid(self, ret, info):
+ self.assertIsInstance(ret, (int, long))
+ self.assertGreaterEqual(ret, 0)
+
+ def name(self, ret, info):
+ self.assertIsInstance(ret, (str, unicode))
+ if APPVEYOR and not ret and info['status'] == 'stopped':
+ return
+ # on AIX, "<exiting>" processes don't have names
+ if not AIX:
+ assert ret
+
+ def create_time(self, ret, info):
+ self.assertIsInstance(ret, float)
+ try:
+ self.assertGreaterEqual(ret, 0)
+ except AssertionError:
+ # XXX
+ if OPENBSD and info['status'] == psutil.STATUS_ZOMBIE:
+ pass
+ else:
+ raise
+ # this can't be taken for granted on all platforms
+ # self.assertGreaterEqual(ret, psutil.boot_time())
+ # make sure returned value can be pretty printed
+ # with strftime
+ time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
+
+ def uids(self, ret, info):
+ assert is_namedtuple(ret)
+ for uid in ret:
+ self.assertIsInstance(uid, int)
+ self.assertGreaterEqual(uid, 0)
+
+ def gids(self, ret, info):
+ assert is_namedtuple(ret)
+ # note: testing all gids as above seems not to be reliable for
+ # gid == 30 (nodoby); not sure why.
+ for gid in ret:
+ self.assertIsInstance(gid, int)
+ if not MACOS and not NETBSD:
+ self.assertGreaterEqual(gid, 0)
+
+ def username(self, ret, info):
+ self.assertIsInstance(ret, str)
+ assert ret
+
+ def status(self, ret, info):
+ self.assertIsInstance(ret, str)
+ assert ret
+ self.assertNotEqual(ret, '?') # XXX
+ self.assertIn(ret, VALID_PROC_STATUSES)
+
+ def io_counters(self, ret, info):
+ assert is_namedtuple(ret)
+ for field in ret:
+ self.assertIsInstance(field, (int, long))
+ if field != -1:
+ self.assertGreaterEqual(field, 0)
+
+ def ionice(self, ret, info):
+ if LINUX:
+ self.assertIsInstance(ret.ioclass, int)
+ self.assertIsInstance(ret.value, int)
+ self.assertGreaterEqual(ret.ioclass, 0)
+ self.assertGreaterEqual(ret.value, 0)
+ else: # Windows, Cygwin
+ choices = [
+ psutil.IOPRIO_VERYLOW,
+ psutil.IOPRIO_LOW,
+ psutil.IOPRIO_NORMAL,
+ psutil.IOPRIO_HIGH]
+ self.assertIsInstance(ret, int)
+ self.assertGreaterEqual(ret, 0)
+ self.assertIn(ret, choices)
+
+ def num_threads(self, ret, info):
+ self.assertIsInstance(ret, int)
+ if APPVEYOR and not ret and info['status'] == 'stopped':
+ return
+ self.assertGreaterEqual(ret, 1)
+
+ def threads(self, ret, info):
+ self.assertIsInstance(ret, list)
+ for t in ret:
+ assert is_namedtuple(t)
+ self.assertGreaterEqual(t.id, 0)
+ self.assertGreaterEqual(t.user_time, 0)
+ self.assertGreaterEqual(t.system_time, 0)
+ for field in t:
+ self.assertIsInstance(field, (int, float))
+
+ def cpu_times(self, ret, info):
+ assert is_namedtuple(ret)
+ for n in ret:
+ self.assertIsInstance(n, float)
+ self.assertGreaterEqual(n, 0)
+ # TODO: check ntuple fields
+
+ def cpu_percent(self, ret, info):
+ self.assertIsInstance(ret, float)
+ assert 0.0 <= ret <= 100.0, ret
+
+ def cpu_num(self, ret, info):
+ self.assertIsInstance(ret, int)
+ if FREEBSD and ret == -1:
+ return
+ self.assertGreaterEqual(ret, 0)
+ if psutil.cpu_count() == 1:
+ self.assertEqual(ret, 0)
+ self.assertIn(ret, list(range(psutil.cpu_count())))
+
+ def memory_info(self, ret, info):
+ assert is_namedtuple(ret)
+ for value in ret:
+ self.assertIsInstance(value, (int, long))
+ self.assertGreaterEqual(value, 0)
+ if WINDOWS:
+ self.assertGreaterEqual(ret.peak_wset, ret.wset)
+ self.assertGreaterEqual(ret.peak_paged_pool, ret.paged_pool)
+ self.assertGreaterEqual(ret.peak_nonpaged_pool, ret.nonpaged_pool)
+ self.assertGreaterEqual(ret.peak_pagefile, ret.pagefile)
+
+ def memory_full_info(self, ret, info):
+ assert is_namedtuple(ret)
+ total = psutil.virtual_memory().total
+ for name in ret._fields:
+ value = getattr(ret, name)
+ self.assertIsInstance(value, (int, long))
+ self.assertGreaterEqual(value, 0, msg=(name, value))
+ if LINUX or OSX and name in ('vms', 'data'):
+ # On Linux there are processes (e.g. 'goa-daemon') whose
+ # VMS is incredibly high for some reason.
+ continue
+ self.assertLessEqual(value, total, msg=(name, value, total))
+
+ if LINUX:
+ self.assertGreaterEqual(ret.pss, ret.uss)
+
+ def open_files(self, ret, info):
+ self.assertIsInstance(ret, list)
+ for f in ret:
+ self.assertIsInstance(f.fd, int)
+ self.assertIsInstance(f.path, str)
+ if WINDOWS:
+ self.assertEqual(f.fd, -1)
+ elif LINUX:
+ self.assertIsInstance(f.position, int)
+ self.assertIsInstance(f.mode, str)
+ self.assertIsInstance(f.flags, int)
+ self.assertGreaterEqual(f.position, 0)
+ self.assertIn(f.mode, ('r', 'w', 'a', 'r+', 'a+'))
+ self.assertGreater(f.flags, 0)
+ elif BSD and not f.path:
+ # XXX see: https://github.com/giampaolo/psutil/issues/595
+ continue
+ assert os.path.isabs(f.path), f
+ try:
+ st = os.stat(f.path)
+ except FileNotFoundError:
+ pass
+ else:
+ assert stat.S_ISREG(st.st_mode), f
+
+ def num_fds(self, ret, info):
+ self.assertIsInstance(ret, int)
+ self.assertGreaterEqual(ret, 0)
+
+ def connections(self, ret, info):
+ with create_sockets():
+ self.assertEqual(len(ret), len(set(ret)))
+ for conn in ret:
+ assert is_namedtuple(conn)
+ check_connection_ntuple(conn)
+
+ def cwd(self, ret, info):
+ if ret: # 'ret' can be None or empty
+ self.assertIsInstance(ret, str)
+ assert os.path.isabs(ret), ret
+ try:
+ st = os.stat(ret)
+ except OSError as err:
+ if WINDOWS and err.errno in \
+ psutil._psplatform.ACCESS_DENIED_SET:
+ pass
+ # directory has been removed in mean time
+ elif err.errno != errno.ENOENT:
+ raise
+ else:
+ assert stat.S_ISDIR(st.st_mode)
+
+ def memory_percent(self, ret, info):
+ self.assertIsInstance(ret, float)
+ assert 0 <= ret <= 100, ret
+
+ def is_running(self, ret, info):
+ self.assertIsInstance(ret, bool)
+
+ def cpu_affinity(self, ret, info):
+ self.assertIsInstance(ret, list)
+ assert ret != [], ret
+ cpus = list(range(psutil.cpu_count()))
+ for n in ret:
+ self.assertIsInstance(n, int)
+ self.assertIn(n, cpus)
+
+ def terminal(self, ret, info):
+ self.assertIsInstance(ret, (str, type(None)))
+ if ret is not None:
+ assert os.path.isabs(ret), ret
+ assert os.path.exists(ret), ret
+
+ def memory_maps(self, ret, info):
+ for nt in ret:
+ self.assertIsInstance(nt.addr, str)
+ self.assertIsInstance(nt.perms, str)
+ self.assertIsInstance(nt.path, str)
+ for fname in nt._fields:
+ value = getattr(nt, fname)
+ if fname == 'path':
+ if not value.startswith(("[", "anon_inode:")):
+ assert os.path.isabs(nt.path), nt.path
+ # commented as on Linux we might get
+ # '/foo/bar (deleted)'
+ # assert os.path.exists(nt.path), nt.path
+ elif fname == 'addr':
+ assert value, repr(value)
+ elif fname == 'perms':
+ if not WINDOWS:
+ assert value, repr(value)
+ else:
+ self.assertIsInstance(value, (int, long))
+ self.assertGreaterEqual(value, 0)
+
+ def num_handles(self, ret, info):
+ self.assertIsInstance(ret, int)
+ self.assertGreaterEqual(ret, 0)
+
+ def nice(self, ret, info):
+ self.assertIsInstance(ret, int)
+ if POSIX:
+ assert -20 <= ret <= 20, ret
+ else:
+ priorities = [getattr(psutil, x) for x in dir(psutil)
+ if x.endswith('_PRIORITY_CLASS')]
+ self.assertIn(ret, priorities)
+ if sys.version_info > (3, 4):
+ self.assertIsInstance(ret, enum.IntEnum)
+ else:
+ self.assertIsInstance(ret, int)
+
+ def num_ctx_switches(self, ret, info):
+ assert is_namedtuple(ret)
+ for value in ret:
+ self.assertIsInstance(value, (int, long))
+ self.assertGreaterEqual(value, 0)
+
+ def rlimit(self, ret, info):
+ self.assertIsInstance(ret, tuple)
+ self.assertEqual(len(ret), 2)
+ self.assertGreaterEqual(ret[0], -1)
+ self.assertGreaterEqual(ret[1], -1)
+
+ def environ(self, ret, info):
+ self.assertIsInstance(ret, dict)
+ for k, v in ret.items():
+ self.assertIsInstance(k, str)
+ self.assertIsInstance(v, str)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_linux.py b/lib/psutil/tests/test_linux.py
new file mode 100644
index 0000000..3e1afc4
--- /dev/null
+++ b/lib/psutil/tests/test_linux.py
@@ -0,0 +1,2286 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Linux specific tests."""
+
+from __future__ import division
+
+import collections
+import contextlib
+import errno
+import glob
+import io
+import os
+import re
+import shutil
+import socket
+import struct
+import textwrap
+import time
+import unittest
+import warnings
+
+import psutil
+from psutil import LINUX
+from psutil._compat import PY3
+from psutil._compat import FileNotFoundError
+from psutil._compat import basestring
+from psutil._compat import u
+from psutil.tests import GITHUB_ACTIONS
+from psutil.tests import GLOBAL_TIMEOUT
+from psutil.tests import HAS_BATTERY
+from psutil.tests import HAS_CPU_FREQ
+from psutil.tests import HAS_GETLOADAVG
+from psutil.tests import HAS_RLIMIT
+from psutil.tests import PYPY
+from psutil.tests import TOLERANCE_DISK_USAGE
+from psutil.tests import TOLERANCE_SYS_MEM
+from psutil.tests import PsutilTestCase
+from psutil.tests import ThreadTask
+from psutil.tests import call_until
+from psutil.tests import mock
+from psutil.tests import reload_module
+from psutil.tests import retry_on_failure
+from psutil.tests import safe_rmpath
+from psutil.tests import sh
+from psutil.tests import skip_on_not_implemented
+from psutil.tests import which
+
+
+if LINUX:
+ from psutil._pslinux import CLOCK_TICKS
+ from psutil._pslinux import RootFsDeviceFinder
+ from psutil._pslinux import calculate_avail_vmem
+ from psutil._pslinux import open_binary
+
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+SIOCGIFADDR = 0x8915
+SIOCGIFCONF = 0x8912
+SIOCGIFHWADDR = 0x8927
+SIOCGIFNETMASK = 0x891b
+SIOCGIFBRDADDR = 0x8919
+if LINUX:
+ SECTOR_SIZE = 512
+EMPTY_TEMPERATURES = not glob.glob('/sys/class/hwmon/hwmon*')
+
+
+# =====================================================================
+# --- utils
+# =====================================================================
+
+
+def get_ipv4_address(ifname):
+ import fcntl
+ ifname = ifname[:15]
+ if PY3:
+ ifname = bytes(ifname, 'ascii')
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ with contextlib.closing(s):
+ return socket.inet_ntoa(
+ fcntl.ioctl(s.fileno(),
+ SIOCGIFADDR,
+ struct.pack('256s', ifname))[20:24])
+
+
+def get_ipv4_netmask(ifname):
+ import fcntl
+ ifname = ifname[:15]
+ if PY3:
+ ifname = bytes(ifname, 'ascii')
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ with contextlib.closing(s):
+ return socket.inet_ntoa(
+ fcntl.ioctl(s.fileno(),
+ SIOCGIFNETMASK,
+ struct.pack('256s', ifname))[20:24])
+
+
+def get_ipv4_broadcast(ifname):
+ import fcntl
+ ifname = ifname[:15]
+ if PY3:
+ ifname = bytes(ifname, 'ascii')
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ with contextlib.closing(s):
+ return socket.inet_ntoa(
+ fcntl.ioctl(s.fileno(),
+ SIOCGIFBRDADDR,
+ struct.pack('256s', ifname))[20:24])
+
+
+def get_ipv6_addresses(ifname):
+ with open("/proc/net/if_inet6", 'rt') as f:
+ all_fields = []
+ for line in f.readlines():
+ fields = line.split()
+ if fields[-1] == ifname:
+ all_fields.append(fields)
+
+ if len(all_fields) == 0:
+ raise ValueError("could not find interface %r" % ifname)
+
+ for i in range(0, len(all_fields)):
+ unformatted = all_fields[i][0]
+ groups = []
+ for j in range(0, len(unformatted), 4):
+ groups.append(unformatted[j:j + 4])
+ formatted = ":".join(groups)
+ packed = socket.inet_pton(socket.AF_INET6, formatted)
+ all_fields[i] = socket.inet_ntop(socket.AF_INET6, packed)
+ return all_fields
+
+
+def get_mac_address(ifname):
+ import fcntl
+ ifname = ifname[:15]
+ if PY3:
+ ifname = bytes(ifname, 'ascii')
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ with contextlib.closing(s):
+ info = fcntl.ioctl(
+ s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
+ if PY3:
+ def ord(x):
+ return x
+ else:
+ import __builtin__
+ ord = __builtin__.ord
+ return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
+
+
+def free_swap():
+ """Parse 'free' cmd and return swap memory's s total, used and free
+ values.
+ """
+ out = sh(["free", "-b"], env={"LANG": "C.UTF-8"})
+ lines = out.split('\n')
+ for line in lines:
+ if line.startswith('Swap'):
+ _, total, used, free = line.split()
+ nt = collections.namedtuple('free', 'total used free')
+ return nt(int(total), int(used), int(free))
+ raise ValueError(
+ "can't find 'Swap' in 'free' output:\n%s" % '\n'.join(lines))
+
+
+def free_physmem():
+ """Parse 'free' cmd and return physical memory's total, used
+ and free values.
+ """
+ # Note: free can have 2 different formats, invalidating 'shared'
+ # and 'cached' memory which may have different positions so we
+ # do not return them.
+ # https://github.com/giampaolo/psutil/issues/538#issuecomment-57059946
+ out = sh(["free", "-b"], env={"LANG": "C.UTF-8"})
+ lines = out.split('\n')
+ for line in lines:
+ if line.startswith('Mem'):
+ total, used, free, shared = \
+ [int(x) for x in line.split()[1:5]]
+ nt = collections.namedtuple(
+ 'free', 'total used free shared output')
+ return nt(total, used, free, shared, out)
+ raise ValueError(
+ "can't find 'Mem' in 'free' output:\n%s" % '\n'.join(lines))
+
+
+def vmstat(stat):
+ out = sh(["vmstat", "-s"], env={"LANG": "C.UTF-8"})
+ for line in out.split("\n"):
+ line = line.strip()
+ if stat in line:
+ return int(line.split(' ')[0])
+ raise ValueError("can't find %r in 'vmstat' output" % stat)
+
+
+def get_free_version_info():
+ out = sh(["free", "-V"]).strip()
+ if 'UNKNOWN' in out:
+ raise unittest.SkipTest("can't determine free version")
+ return tuple(map(int, out.split()[-1].split('.')))
+
+
+@contextlib.contextmanager
+def mock_open_content(for_path, content):
+ """Mock open() builtin and forces it to return a certain `content`
+ on read() if the path being opened matches `for_path`.
+ """
+ def open_mock(name, *args, **kwargs):
+ if name == for_path:
+ if PY3:
+ if isinstance(content, basestring):
+ return io.StringIO(content)
+ else:
+ return io.BytesIO(content)
+ else:
+ return io.BytesIO(content)
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
+ yield m
+
+
+@contextlib.contextmanager
+def mock_open_exception(for_path, exc):
+ """Mock open() builtin and raises `exc` if the path being opened
+ matches `for_path`.
+ """
+ def open_mock(name, *args, **kwargs):
+ if name == for_path:
+ raise exc
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
+ yield m
+
+
+# =====================================================================
+# --- system virtual memory
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemVirtualMemory(PsutilTestCase):
+
+ def test_total(self):
+ # free_value = free_physmem().total
+ # psutil_value = psutil.virtual_memory().total
+ # self.assertEqual(free_value, psutil_value)
+ vmstat_value = vmstat('total memory') * 1024
+ psutil_value = psutil.virtual_memory().total
+ self.assertAlmostEqual(
+ vmstat_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_used(self):
+ # Older versions of procps used slab memory to calculate used memory.
+ # This got changed in:
+ # https://gitlab.com/procps-ng/procps/commit/
+ # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
+ if get_free_version_info() < (3, 3, 12):
+ raise self.skipTest("old free version")
+ free = free_physmem()
+ free_value = free.used
+ psutil_value = psutil.virtual_memory().used
+ self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM,
+ msg='%s %s \n%s' % (free_value, psutil_value, free.output))
+
+ @retry_on_failure()
+ def test_free(self):
+ vmstat_value = vmstat('free memory') * 1024
+ psutil_value = psutil.virtual_memory().free
+ self.assertAlmostEqual(
+ vmstat_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_buffers(self):
+ vmstat_value = vmstat('buffer memory') * 1024
+ psutil_value = psutil.virtual_memory().buffers
+ self.assertAlmostEqual(
+ vmstat_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_active(self):
+ vmstat_value = vmstat('active memory') * 1024
+ psutil_value = psutil.virtual_memory().active
+ self.assertAlmostEqual(
+ vmstat_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_inactive(self):
+ vmstat_value = vmstat('inactive memory') * 1024
+ psutil_value = psutil.virtual_memory().inactive
+ self.assertAlmostEqual(
+ vmstat_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_shared(self):
+ free = free_physmem()
+ free_value = free.shared
+ if free_value == 0:
+ raise unittest.SkipTest("free does not support 'shared' column")
+ psutil_value = psutil.virtual_memory().shared
+ self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM,
+ msg='%s %s \n%s' % (free_value, psutil_value, free.output))
+
+ @retry_on_failure()
+ def test_available(self):
+ # "free" output format has changed at some point:
+ # https://github.com/giampaolo/psutil/issues/538#issuecomment-147192098
+ out = sh(["free", "-b"])
+ lines = out.split('\n')
+ if 'available' not in lines[0]:
+ raise unittest.SkipTest("free does not support 'available' column")
+ else:
+ free_value = int(lines[1].split()[-1])
+ psutil_value = psutil.virtual_memory().available
+ self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM,
+ msg='%s %s \n%s' % (free_value, psutil_value, out))
+
+ def test_warnings_on_misses(self):
+ # Emulate a case where /proc/meminfo provides few info.
+ # psutil is supposed to set the missing fields to 0 and
+ # raise a warning.
+ with mock_open_content(
+ '/proc/meminfo',
+ textwrap.dedent("""\
+ Active(anon): 6145416 kB
+ Active(file): 2950064 kB
+ Inactive(anon): 574764 kB
+ Inactive(file): 1567648 kB
+ MemAvailable: -1 kB
+ MemFree: 2057400 kB
+ MemTotal: 16325648 kB
+ SReclaimable: 346648 kB
+ """).encode()) as m:
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.simplefilter("always")
+ ret = psutil.virtual_memory()
+ assert m.called
+ self.assertEqual(len(ws), 1)
+ w = ws[0]
+ assert w.filename.endswith('psutil/_pslinux.py')
+ self.assertIn(
+ "memory stats couldn't be determined", str(w.message))
+ self.assertIn("cached", str(w.message))
+ self.assertIn("shared", str(w.message))
+ self.assertIn("active", str(w.message))
+ self.assertIn("inactive", str(w.message))
+ self.assertIn("buffers", str(w.message))
+ self.assertIn("available", str(w.message))
+ self.assertEqual(ret.cached, 0)
+ self.assertEqual(ret.active, 0)
+ self.assertEqual(ret.inactive, 0)
+ self.assertEqual(ret.shared, 0)
+ self.assertEqual(ret.buffers, 0)
+ self.assertEqual(ret.available, 0)
+ self.assertEqual(ret.slab, 0)
+
+ @retry_on_failure()
+ def test_avail_old_percent(self):
+ # Make sure that our calculation of avail mem for old kernels
+ # is off by max 15%.
+ mems = {}
+ with open_binary('/proc/meminfo') as f:
+ for line in f:
+ fields = line.split()
+ mems[fields[0]] = int(fields[1]) * 1024
+
+ a = calculate_avail_vmem(mems)
+ if b'MemAvailable:' in mems:
+ b = mems[b'MemAvailable:']
+ diff_percent = abs(a - b) / a * 100
+ self.assertLess(diff_percent, 15)
+
+ def test_avail_old_comes_from_kernel(self):
+ # Make sure "MemAvailable:" coluimn is used instead of relying
+ # on our internal algorithm to calculate avail mem.
+ with mock_open_content(
+ '/proc/meminfo',
+ textwrap.dedent("""\
+ Active: 9444728 kB
+ Active(anon): 6145416 kB
+ Active(file): 2950064 kB
+ Buffers: 287952 kB
+ Cached: 4818144 kB
+ Inactive(file): 1578132 kB
+ Inactive(anon): 574764 kB
+ Inactive(file): 1567648 kB
+ MemAvailable: 6574984 kB
+ MemFree: 2057400 kB
+ MemTotal: 16325648 kB
+ Shmem: 577588 kB
+ SReclaimable: 346648 kB
+ """).encode()) as m:
+ with warnings.catch_warnings(record=True) as ws:
+ ret = psutil.virtual_memory()
+ assert m.called
+ self.assertEqual(ret.available, 6574984 * 1024)
+ w = ws[0]
+ self.assertIn(
+ "inactive memory stats couldn't be determined", str(w.message))
+
+ def test_avail_old_missing_fields(self):
+ # Remove Active(file), Inactive(file) and SReclaimable
+ # from /proc/meminfo and make sure the fallback is used
+ # (free + cached),
+ with mock_open_content(
+ "/proc/meminfo",
+ textwrap.dedent("""\
+ Active: 9444728 kB
+ Active(anon): 6145416 kB
+ Buffers: 287952 kB
+ Cached: 4818144 kB
+ Inactive(file): 1578132 kB
+ Inactive(anon): 574764 kB
+ MemFree: 2057400 kB
+ MemTotal: 16325648 kB
+ Shmem: 577588 kB
+ """).encode()) as m:
+ with warnings.catch_warnings(record=True) as ws:
+ ret = psutil.virtual_memory()
+ assert m.called
+ self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024)
+ w = ws[0]
+ self.assertIn(
+ "inactive memory stats couldn't be determined", str(w.message))
+
+ def test_avail_old_missing_zoneinfo(self):
+ # Remove /proc/zoneinfo file. Make sure fallback is used
+ # (free + cached).
+ with mock_open_content(
+ "/proc/meminfo",
+ textwrap.dedent("""\
+ Active: 9444728 kB
+ Active(anon): 6145416 kB
+ Active(file): 2950064 kB
+ Buffers: 287952 kB
+ Cached: 4818144 kB
+ Inactive(file): 1578132 kB
+ Inactive(anon): 574764 kB
+ Inactive(file): 1567648 kB
+ MemFree: 2057400 kB
+ MemTotal: 16325648 kB
+ Shmem: 577588 kB
+ SReclaimable: 346648 kB
+ """).encode()):
+ with mock_open_exception(
+ "/proc/zoneinfo",
+ IOError(errno.ENOENT, 'no such file or directory')):
+ with warnings.catch_warnings(record=True) as ws:
+ ret = psutil.virtual_memory()
+ self.assertEqual(
+ ret.available, 2057400 * 1024 + 4818144 * 1024)
+ w = ws[0]
+ self.assertIn(
+ "inactive memory stats couldn't be determined",
+ str(w.message))
+
+ def test_virtual_memory_mocked(self):
+ # Emulate /proc/meminfo because neither vmstat nor free return slab.
+ def open_mock(name, *args, **kwargs):
+ if name == '/proc/meminfo':
+ return io.BytesIO(textwrap.dedent("""\
+ MemTotal: 100 kB
+ MemFree: 2 kB
+ MemAvailable: 3 kB
+ Buffers: 4 kB
+ Cached: 5 kB
+ SwapCached: 6 kB
+ Active: 7 kB
+ Inactive: 8 kB
+ Active(anon): 9 kB
+ Inactive(anon): 10 kB
+ Active(file): 11 kB
+ Inactive(file): 12 kB
+ Unevictable: 13 kB
+ Mlocked: 14 kB
+ SwapTotal: 15 kB
+ SwapFree: 16 kB
+ Dirty: 17 kB
+ Writeback: 18 kB
+ AnonPages: 19 kB
+ Mapped: 20 kB
+ Shmem: 21 kB
+ Slab: 22 kB
+ SReclaimable: 23 kB
+ SUnreclaim: 24 kB
+ KernelStack: 25 kB
+ PageTables: 26 kB
+ NFS_Unstable: 27 kB
+ Bounce: 28 kB
+ WritebackTmp: 29 kB
+ CommitLimit: 30 kB
+ Committed_AS: 31 kB
+ VmallocTotal: 32 kB
+ VmallocUsed: 33 kB
+ VmallocChunk: 34 kB
+ HardwareCorrupted: 35 kB
+ AnonHugePages: 36 kB
+ ShmemHugePages: 37 kB
+ ShmemPmdMapped: 38 kB
+ CmaTotal: 39 kB
+ CmaFree: 40 kB
+ HugePages_Total: 41 kB
+ HugePages_Free: 42 kB
+ HugePages_Rsvd: 43 kB
+ HugePages_Surp: 44 kB
+ Hugepagesize: 45 kB
+ DirectMap46k: 46 kB
+ DirectMap47M: 47 kB
+ DirectMap48G: 48 kB
+ """).encode())
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
+ mem = psutil.virtual_memory()
+ assert m.called
+ self.assertEqual(mem.total, 100 * 1024)
+ self.assertEqual(mem.free, 2 * 1024)
+ self.assertEqual(mem.buffers, 4 * 1024)
+ # cached mem also includes reclaimable memory
+ self.assertEqual(mem.cached, (5 + 23) * 1024)
+ self.assertEqual(mem.shared, 21 * 1024)
+ self.assertEqual(mem.active, 7 * 1024)
+ self.assertEqual(mem.inactive, 8 * 1024)
+ self.assertEqual(mem.slab, 22 * 1024)
+ self.assertEqual(mem.available, 3 * 1024)
+
+
+# =====================================================================
+# --- system swap memory
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemSwapMemory(PsutilTestCase):
+
+ @staticmethod
+ def meminfo_has_swap_info():
+ """Return True if /proc/meminfo provides swap metrics."""
+ with open("/proc/meminfo") as f:
+ data = f.read()
+ return 'SwapTotal:' in data and 'SwapFree:' in data
+
+ def test_total(self):
+ free_value = free_swap().total
+ psutil_value = psutil.swap_memory().total
+ return self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_used(self):
+ free_value = free_swap().used
+ psutil_value = psutil.swap_memory().used
+ return self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_free(self):
+ free_value = free_swap().free
+ psutil_value = psutil.swap_memory().free
+ return self.assertAlmostEqual(
+ free_value, psutil_value, delta=TOLERANCE_SYS_MEM)
+
+ def test_missing_sin_sout(self):
+ with mock.patch('psutil._common.open', create=True) as m:
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.simplefilter("always")
+ ret = psutil.swap_memory()
+ assert m.called
+ self.assertEqual(len(ws), 1)
+ w = ws[0]
+ assert w.filename.endswith('psutil/_pslinux.py')
+ self.assertIn(
+ "'sin' and 'sout' swap memory stats couldn't "
+ "be determined", str(w.message))
+ self.assertEqual(ret.sin, 0)
+ self.assertEqual(ret.sout, 0)
+
+ def test_no_vmstat_mocked(self):
+ # see https://github.com/giampaolo/psutil/issues/722
+ with mock_open_exception(
+ "/proc/vmstat",
+ IOError(errno.ENOENT, 'no such file or directory')) as m:
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.simplefilter("always")
+ ret = psutil.swap_memory()
+ assert m.called
+ self.assertEqual(len(ws), 1)
+ w = ws[0]
+ assert w.filename.endswith('psutil/_pslinux.py')
+ self.assertIn(
+ "'sin' and 'sout' swap memory stats couldn't "
+ "be determined and were set to 0",
+ str(w.message))
+ self.assertEqual(ret.sin, 0)
+ self.assertEqual(ret.sout, 0)
+
+ def test_meminfo_against_sysinfo(self):
+ # Make sure the content of /proc/meminfo about swap memory
+ # matches sysinfo() syscall, see:
+ # https://github.com/giampaolo/psutil/issues/1015
+ if not self.meminfo_has_swap_info():
+ return unittest.skip("/proc/meminfo has no swap metrics")
+ with mock.patch('psutil._pslinux.cext.linux_sysinfo') as m:
+ swap = psutil.swap_memory()
+ assert not m.called
+ import psutil._psutil_linux as cext
+ _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
+ total *= unit_multiplier
+ free *= unit_multiplier
+ self.assertEqual(swap.total, total)
+ self.assertAlmostEqual(swap.free, free, delta=TOLERANCE_SYS_MEM)
+
+ def test_emulate_meminfo_has_no_metrics(self):
+ # Emulate a case where /proc/meminfo provides no swap metrics
+ # in which case sysinfo() syscall is supposed to be used
+ # as a fallback.
+ with mock_open_content("/proc/meminfo", b"") as m:
+ psutil.swap_memory()
+ assert m.called
+
+
+# =====================================================================
+# --- system CPU
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemCPUTimes(PsutilTestCase):
+
+ def test_fields(self):
+ fields = psutil.cpu_times()._fields
+ kernel_ver = re.findall(r'\d+\.\d+\.\d+', os.uname()[2])[0]
+ kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
+ if kernel_ver_info >= (2, 6, 11):
+ self.assertIn('steal', fields)
+ else:
+ self.assertNotIn('steal', fields)
+ if kernel_ver_info >= (2, 6, 24):
+ self.assertIn('guest', fields)
+ else:
+ self.assertNotIn('guest', fields)
+ if kernel_ver_info >= (3, 2, 0):
+ self.assertIn('guest_nice', fields)
+ else:
+ self.assertNotIn('guest_nice', fields)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemCPUCountLogical(PsutilTestCase):
+
+ @unittest.skipIf(not os.path.exists("/sys/devices/system/cpu/online"),
+ "/sys/devices/system/cpu/online does not exist")
+ def test_against_sysdev_cpu_online(self):
+ with open("/sys/devices/system/cpu/online") as f:
+ value = f.read().strip()
+ if "-" in str(value):
+ value = int(value.split('-')[1]) + 1
+ self.assertEqual(psutil.cpu_count(), value)
+
+ @unittest.skipIf(not os.path.exists("/sys/devices/system/cpu"),
+ "/sys/devices/system/cpu does not exist")
+ def test_against_sysdev_cpu_num(self):
+ ls = os.listdir("/sys/devices/system/cpu")
+ count = len([x for x in ls if re.search(r"cpu\d+$", x) is not None])
+ self.assertEqual(psutil.cpu_count(), count)
+
+ @unittest.skipIf(not which("nproc"), "nproc utility not available")
+ def test_against_nproc(self):
+ num = int(sh("nproc --all"))
+ self.assertEqual(psutil.cpu_count(logical=True), num)
+
+ @unittest.skipIf(not which("lscpu"), "lscpu utility not available")
+ def test_against_lscpu(self):
+ out = sh("lscpu -p")
+ num = len([x for x in out.split('\n') if not x.startswith('#')])
+ self.assertEqual(psutil.cpu_count(logical=True), num)
+
+ def test_emulate_fallbacks(self):
+ import psutil._pslinux
+ original = psutil._pslinux.cpu_count_logical()
+ # Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in
+ # order to cause the parsing of /proc/cpuinfo and /proc/stat.
+ with mock.patch(
+ 'psutil._pslinux.os.sysconf', side_effect=ValueError) as m:
+ self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
+ assert m.called
+
+ # Let's have open() return empty data and make sure None is
+ # returned ('cause we mimic os.cpu_count()).
+ with mock.patch('psutil._common.open', create=True) as m:
+ self.assertIsNone(psutil._pslinux.cpu_count_logical())
+ self.assertEqual(m.call_count, 2)
+ # /proc/stat should be the last one
+ self.assertEqual(m.call_args[0][0], '/proc/stat')
+
+ # Let's push this a bit further and make sure /proc/cpuinfo
+ # parsing works as expected.
+ with open('/proc/cpuinfo', 'rb') as f:
+ cpuinfo_data = f.read()
+ fake_file = io.BytesIO(cpuinfo_data)
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
+
+ # Finally, let's make /proc/cpuinfo return meaningless data;
+ # this way we'll fall back on relying on /proc/stat
+ with mock_open_content('/proc/cpuinfo', b"") as m:
+ self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
+ m.called
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemCPUCountCores(PsutilTestCase):
+
+ @unittest.skipIf(not which("lscpu"), "lscpu utility not available")
+ def test_against_lscpu(self):
+ out = sh("lscpu -p")
+ core_ids = set()
+ for line in out.split('\n'):
+ if not line.startswith('#'):
+ fields = line.split(',')
+ core_ids.add(fields[1])
+ self.assertEqual(psutil.cpu_count(logical=False), len(core_ids))
+
+ def test_method_2(self):
+ meth_1 = psutil._pslinux.cpu_count_cores()
+ with mock.patch('glob.glob', return_value=[]) as m:
+ meth_2 = psutil._pslinux.cpu_count_cores()
+ assert m.called
+ if meth_1 is not None:
+ self.assertEqual(meth_1, meth_2)
+
+ def test_emulate_none(self):
+ with mock.patch('glob.glob', return_value=[]) as m1:
+ with mock.patch('psutil._common.open', create=True) as m2:
+ self.assertIsNone(psutil._pslinux.cpu_count_cores())
+ assert m1.called
+ assert m2.called
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemCPUFrequency(PsutilTestCase):
+
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_emulate_use_second_file(self):
+ # https://github.com/giampaolo/psutil/issues/981
+ def path_exists_mock(path):
+ if path.startswith("/sys/devices/system/cpu/cpufreq/policy"):
+ return False
+ else:
+ return orig_exists(path)
+
+ orig_exists = os.path.exists
+ with mock.patch("os.path.exists", side_effect=path_exists_mock,
+ create=True):
+ assert psutil.cpu_freq()
+
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_emulate_use_cpuinfo(self):
+ # Emulate a case where /sys/devices/system/cpu/cpufreq* does not
+ # exist and /proc/cpuinfo is used instead.
+ def path_exists_mock(path):
+ if path.startswith('/sys/devices/system/cpu/'):
+ return False
+ else:
+ return os_path_exists(path)
+
+ os_path_exists = os.path.exists
+ try:
+ with mock.patch("os.path.exists", side_effect=path_exists_mock):
+ reload_module(psutil._pslinux)
+ ret = psutil.cpu_freq()
+ assert ret
+ self.assertEqual(ret.max, 0.0)
+ self.assertEqual(ret.min, 0.0)
+ for freq in psutil.cpu_freq(percpu=True):
+ self.assertEqual(ret.max, 0.0)
+ self.assertEqual(ret.min, 0.0)
+ finally:
+ reload_module(psutil._pslinux)
+ reload_module(psutil)
+
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_emulate_data(self):
+ def open_mock(name, *args, **kwargs):
+ if (name.endswith('/scaling_cur_freq') and
+ name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
+ return io.BytesIO(b"500000")
+ elif (name.endswith('/scaling_min_freq') and
+ name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
+ return io.BytesIO(b"600000")
+ elif (name.endswith('/scaling_max_freq') and
+ name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
+ return io.BytesIO(b"700000")
+ elif name == '/proc/cpuinfo':
+ return io.BytesIO(b"cpu MHz : 500")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ with mock.patch(
+ 'os.path.exists', return_value=True):
+ freq = psutil.cpu_freq()
+ self.assertEqual(freq.current, 500.0)
+ # when /proc/cpuinfo is used min and max frequencies are not
+ # available and are set to 0.
+ if freq.min != 0.0:
+ self.assertEqual(freq.min, 600.0)
+ if freq.max != 0.0:
+ self.assertEqual(freq.max, 700.0)
+
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_emulate_multi_cpu(self):
+ def open_mock(name, *args, **kwargs):
+ n = name
+ if (n.endswith('/scaling_cur_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
+ return io.BytesIO(b"100000")
+ elif (n.endswith('/scaling_min_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
+ return io.BytesIO(b"200000")
+ elif (n.endswith('/scaling_max_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
+ return io.BytesIO(b"300000")
+ elif (n.endswith('/scaling_cur_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
+ return io.BytesIO(b"400000")
+ elif (n.endswith('/scaling_min_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
+ return io.BytesIO(b"500000")
+ elif (n.endswith('/scaling_max_freq') and
+ n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
+ return io.BytesIO(b"600000")
+ elif name == '/proc/cpuinfo':
+ return io.BytesIO(b"cpu MHz : 100\n"
+ b"cpu MHz : 400")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ with mock.patch('os.path.exists', return_value=True):
+ with mock.patch('psutil._pslinux.cpu_count_logical',
+ return_value=2):
+ freq = psutil.cpu_freq(percpu=True)
+ self.assertEqual(freq[0].current, 100.0)
+ if freq[0].min != 0.0:
+ self.assertEqual(freq[0].min, 200.0)
+ if freq[0].max != 0.0:
+ self.assertEqual(freq[0].max, 300.0)
+ self.assertEqual(freq[1].current, 400.0)
+ if freq[1].min != 0.0:
+ self.assertEqual(freq[1].min, 500.0)
+ if freq[1].max != 0.0:
+ self.assertEqual(freq[1].max, 600.0)
+
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_emulate_no_scaling_cur_freq_file(self):
+ # See: https://github.com/giampaolo/psutil/issues/1071
+ def open_mock(name, *args, **kwargs):
+ if name.endswith('/scaling_cur_freq'):
+ raise IOError(errno.ENOENT, "")
+ elif name.endswith('/cpuinfo_cur_freq'):
+ return io.BytesIO(b"200000")
+ elif name == '/proc/cpuinfo':
+ return io.BytesIO(b"cpu MHz : 200")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ with mock.patch('os.path.exists', return_value=True):
+ with mock.patch('psutil._pslinux.cpu_count_logical',
+ return_value=1):
+ freq = psutil.cpu_freq()
+ self.assertEqual(freq.current, 200)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemCPUStats(PsutilTestCase):
+
+ def test_ctx_switches(self):
+ vmstat_value = vmstat("context switches")
+ psutil_value = psutil.cpu_stats().ctx_switches
+ self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
+
+ def test_interrupts(self):
+ vmstat_value = vmstat("interrupts")
+ psutil_value = psutil.cpu_stats().interrupts
+ self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestLoadAvg(PsutilTestCase):
+
+ @unittest.skipIf(not HAS_GETLOADAVG, "not supported")
+ def test_getloadavg(self):
+ psutil_value = psutil.getloadavg()
+ with open("/proc/loadavg", "r") as f:
+ proc_value = f.read().split()
+
+ self.assertAlmostEqual(float(proc_value[0]), psutil_value[0], delta=1)
+ self.assertAlmostEqual(float(proc_value[1]), psutil_value[1], delta=1)
+ self.assertAlmostEqual(float(proc_value[2]), psutil_value[2], delta=1)
+
+
+# =====================================================================
+# --- system network
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemNetIfAddrs(PsutilTestCase):
+
+ def test_ips(self):
+ for name, addrs in psutil.net_if_addrs().items():
+ for addr in addrs:
+ if addr.family == psutil.AF_LINK:
+ self.assertEqual(addr.address, get_mac_address(name))
+ elif addr.family == socket.AF_INET:
+ self.assertEqual(addr.address, get_ipv4_address(name))
+ self.assertEqual(addr.netmask, get_ipv4_netmask(name))
+ if addr.broadcast is not None:
+ self.assertEqual(addr.broadcast,
+ get_ipv4_broadcast(name))
+ else:
+ self.assertEqual(get_ipv4_broadcast(name), '0.0.0.0')
+ elif addr.family == socket.AF_INET6:
+ # IPv6 addresses can have a percent symbol at the end.
+ # E.g. these 2 are equivalent:
+ # "fe80::1ff:fe23:4567:890a"
+ # "fe80::1ff:fe23:4567:890a%eth0"
+ # That is the "zone id" portion, which usually is the name
+ # of the network interface.
+ address = addr.address.split('%')[0]
+ self.assertIn(address, get_ipv6_addresses(name))
+
+ # XXX - not reliable when having virtual NICs installed by Docker.
+ # @unittest.skipIf(not which('ip'), "'ip' utility not available")
+ # def test_net_if_names(self):
+ # out = sh("ip addr").strip()
+ # nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
+ # found = 0
+ # for line in out.split('\n'):
+ # line = line.strip()
+ # if re.search(r"^\d+:", line):
+ # found += 1
+ # name = line.split(':')[1].strip()
+ # self.assertIn(name, nics)
+ # self.assertEqual(len(nics), found, msg="%s\n---\n%s" % (
+ # pprint.pformat(nics), out))
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemNetIfStats(PsutilTestCase):
+
+ @unittest.skipIf(not which("ifconfig"), "ifconfig utility not available")
+ def test_against_ifconfig(self):
+ for name, stats in psutil.net_if_stats().items():
+ try:
+ out = sh("ifconfig %s" % name)
+ except RuntimeError:
+ pass
+ else:
+ self.assertEqual(stats.isup, 'RUNNING' in out, msg=out)
+ self.assertEqual(stats.mtu,
+ int(re.findall(r'(?i)MTU[: ](\d+)', out)[0]))
+
+ def test_mtu(self):
+ for name, stats in psutil.net_if_stats().items():
+ with open("/sys/class/net/%s/mtu" % name, "rt") as f:
+ self.assertEqual(stats.mtu, int(f.read().strip()))
+
+ @unittest.skipIf(not which("ifconfig"), "ifconfig utility not available")
+ def test_flags(self):
+ # first line looks like this:
+ # "eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500"
+ matches_found = 0
+ for name, stats in psutil.net_if_stats().items():
+ try:
+ out = sh("ifconfig %s" % name)
+ except RuntimeError:
+ pass
+ else:
+ match = re.search(r"flags=(\d+)?<(.*?)>", out)
+ if match and len(match.groups()) >= 2:
+ matches_found += 1
+ ifconfig_flags = set(match.group(2).lower().split(","))
+ psutil_flags = set(stats.flags.split(","))
+ self.assertEqual(ifconfig_flags, psutil_flags)
+ else:
+ # ifconfig has a different output on CentOS 6
+ # let's try that
+ match = re.search(r"(.*) MTU:(\d+) Metric:(\d+)", out)
+ if match and len(match.groups()) >= 3:
+ matches_found += 1
+ ifconfig_flags = set(match.group(1).lower().split())
+ psutil_flags = set(stats.flags.split(","))
+ self.assertEqual(ifconfig_flags, psutil_flags)
+
+ if not matches_found:
+ raise self.fail("no matches were found")
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemNetIOCounters(PsutilTestCase):
+
+ @unittest.skipIf(not which("ifconfig"), "ifconfig utility not available")
+ @retry_on_failure()
+ def test_against_ifconfig(self):
+ def ifconfig(nic):
+ ret = {}
+ out = sh("ifconfig %s" % name)
+ ret['packets_recv'] = int(
+ re.findall(r'RX packets[: ](\d+)', out)[0])
+ ret['packets_sent'] = int(
+ re.findall(r'TX packets[: ](\d+)', out)[0])
+ ret['errin'] = int(re.findall(r'errors[: ](\d+)', out)[0])
+ ret['errout'] = int(re.findall(r'errors[: ](\d+)', out)[1])
+ ret['dropin'] = int(re.findall(r'dropped[: ](\d+)', out)[0])
+ ret['dropout'] = int(re.findall(r'dropped[: ](\d+)', out)[1])
+ ret['bytes_recv'] = int(
+ re.findall(r'RX (?:packets \d+ +)?bytes[: ](\d+)', out)[0])
+ ret['bytes_sent'] = int(
+ re.findall(r'TX (?:packets \d+ +)?bytes[: ](\d+)', out)[0])
+ return ret
+
+ nio = psutil.net_io_counters(pernic=True, nowrap=False)
+ for name, stats in nio.items():
+ try:
+ ifconfig_ret = ifconfig(name)
+ except RuntimeError:
+ continue
+ self.assertAlmostEqual(
+ stats.bytes_recv, ifconfig_ret['bytes_recv'], delta=1024 * 5)
+ self.assertAlmostEqual(
+ stats.bytes_sent, ifconfig_ret['bytes_sent'], delta=1024 * 5)
+ self.assertAlmostEqual(
+ stats.packets_recv, ifconfig_ret['packets_recv'], delta=1024)
+ self.assertAlmostEqual(
+ stats.packets_sent, ifconfig_ret['packets_sent'], delta=1024)
+ self.assertAlmostEqual(
+ stats.errin, ifconfig_ret['errin'], delta=10)
+ self.assertAlmostEqual(
+ stats.errout, ifconfig_ret['errout'], delta=10)
+ self.assertAlmostEqual(
+ stats.dropin, ifconfig_ret['dropin'], delta=10)
+ self.assertAlmostEqual(
+ stats.dropout, ifconfig_ret['dropout'], delta=10)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemNetConnections(PsutilTestCase):
+
+ @mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
+ @mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
+ def test_emulate_ipv6_unsupported(self, supports_ipv6, inet_ntop):
+ # see: https://github.com/giampaolo/psutil/issues/623
+ try:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ self.addCleanup(s.close)
+ s.bind(("::1", 0))
+ except socket.error:
+ pass
+ psutil.net_connections(kind='inet6')
+
+ def test_emulate_unix(self):
+ with mock_open_content(
+ '/proc/net/unix',
+ textwrap.dedent("""\
+ 0: 00000003 000 000 0001 03 462170 @/tmp/dbus-Qw2hMPIU3n
+ 0: 00000003 000 000 0001 03 35010 @/tmp/dbus-tB2X8h69BQ
+ 0: 00000003 000 000 0001 03 34424 @/tmp/dbus-cHy80Y8O
+ 000000000000000000000000000000000000000000000000000000
+ """)) as m:
+ psutil.net_connections(kind='unix')
+ assert m.called
+
+
+# =====================================================================
+# --- system disks
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemDiskPartitions(PsutilTestCase):
+
+ @unittest.skipIf(not hasattr(os, 'statvfs'), "os.statvfs() not available")
+ @skip_on_not_implemented()
+ def test_against_df(self):
+ # test psutil.disk_usage() and psutil.disk_partitions()
+ # against "df -a"
+ def df(path):
+ out = sh('df -P -B 1 "%s"' % path).strip()
+ lines = out.split('\n')
+ lines.pop(0)
+ line = lines.pop(0)
+ dev, total, used, free = line.split()[:4]
+ if dev == 'none':
+ dev = ''
+ total, used, free = int(total), int(used), int(free)
+ return dev, total, used, free
+
+ for part in psutil.disk_partitions(all=False):
+ usage = psutil.disk_usage(part.mountpoint)
+ dev, total, used, free = df(part.mountpoint)
+ self.assertEqual(usage.total, total)
+ self.assertAlmostEqual(usage.free, free,
+ delta=TOLERANCE_DISK_USAGE)
+ self.assertAlmostEqual(usage.used, used,
+ delta=TOLERANCE_DISK_USAGE)
+
+ def test_zfs_fs(self):
+ # Test that ZFS partitions are returned.
+ with open("/proc/filesystems", "r") as f:
+ data = f.read()
+ if 'zfs' in data:
+ for part in psutil.disk_partitions():
+ if part.fstype == 'zfs':
+ break
+ else:
+ raise self.fail("couldn't find any ZFS partition")
+ else:
+ # No ZFS partitions on this system. Let's fake one.
+ fake_file = io.StringIO(u("nodev\tzfs\n"))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m1:
+ with mock.patch(
+ 'psutil._pslinux.cext.disk_partitions',
+ return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2:
+ ret = psutil.disk_partitions()
+ assert m1.called
+ assert m2.called
+ assert ret
+ self.assertEqual(ret[0].fstype, 'zfs')
+
+ def test_emulate_realpath_fail(self):
+ # See: https://github.com/giampaolo/psutil/issues/1307
+ try:
+ with mock.patch('os.path.realpath',
+ return_value='/non/existent') as m:
+ with self.assertRaises(FileNotFoundError):
+ psutil.disk_partitions()
+ assert m.called
+ finally:
+ psutil.PROCFS_PATH = "/proc"
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSystemDiskIoCounters(PsutilTestCase):
+
+ def test_emulate_kernel_2_4(self):
+ # Tests /proc/diskstats parsing format for 2.4 kernels, see:
+ # https://github.com/giampaolo/psutil/issues/767
+ with mock_open_content(
+ '/proc/diskstats',
+ " 3 0 1 hda 2 3 4 5 6 7 8 9 10 11 12"):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ return_value=True):
+ ret = psutil.disk_io_counters(nowrap=False)
+ self.assertEqual(ret.read_count, 1)
+ self.assertEqual(ret.read_merged_count, 2)
+ self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
+ self.assertEqual(ret.read_time, 4)
+ self.assertEqual(ret.write_count, 5)
+ self.assertEqual(ret.write_merged_count, 6)
+ self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
+ self.assertEqual(ret.write_time, 8)
+ self.assertEqual(ret.busy_time, 10)
+
+ def test_emulate_kernel_2_6_full(self):
+ # Tests /proc/diskstats parsing format for 2.6 kernels,
+ # lines reporting all metrics:
+ # https://github.com/giampaolo/psutil/issues/767
+ with mock_open_content(
+ '/proc/diskstats',
+ " 3 0 hda 1 2 3 4 5 6 7 8 9 10 11"):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ return_value=True):
+ ret = psutil.disk_io_counters(nowrap=False)
+ self.assertEqual(ret.read_count, 1)
+ self.assertEqual(ret.read_merged_count, 2)
+ self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
+ self.assertEqual(ret.read_time, 4)
+ self.assertEqual(ret.write_count, 5)
+ self.assertEqual(ret.write_merged_count, 6)
+ self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
+ self.assertEqual(ret.write_time, 8)
+ self.assertEqual(ret.busy_time, 10)
+
+ def test_emulate_kernel_2_6_limited(self):
+ # Tests /proc/diskstats parsing format for 2.6 kernels,
+ # where one line of /proc/partitions return a limited
+ # amount of metrics when it bumps into a partition
+ # (instead of a disk). See:
+ # https://github.com/giampaolo/psutil/issues/767
+ with mock_open_content(
+ '/proc/diskstats',
+ " 3 1 hda 1 2 3 4"):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ return_value=True):
+ ret = psutil.disk_io_counters(nowrap=False)
+ self.assertEqual(ret.read_count, 1)
+ self.assertEqual(ret.read_bytes, 2 * SECTOR_SIZE)
+ self.assertEqual(ret.write_count, 3)
+ self.assertEqual(ret.write_bytes, 4 * SECTOR_SIZE)
+
+ self.assertEqual(ret.read_merged_count, 0)
+ self.assertEqual(ret.read_time, 0)
+ self.assertEqual(ret.write_merged_count, 0)
+ self.assertEqual(ret.write_time, 0)
+ self.assertEqual(ret.busy_time, 0)
+
+ def test_emulate_include_partitions(self):
+ # Make sure that when perdisk=True disk partitions are returned,
+ # see:
+ # https://github.com/giampaolo/psutil/pull/1313#issuecomment-408626842
+ with mock_open_content(
+ '/proc/diskstats',
+ textwrap.dedent("""\
+ 3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
+ 3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
+ """)):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ return_value=False):
+ ret = psutil.disk_io_counters(perdisk=True, nowrap=False)
+ self.assertEqual(len(ret), 2)
+ self.assertEqual(ret['nvme0n1'].read_count, 1)
+ self.assertEqual(ret['nvme0n1p1'].read_count, 1)
+ self.assertEqual(ret['nvme0n1'].write_count, 5)
+ self.assertEqual(ret['nvme0n1p1'].write_count, 5)
+
+ def test_emulate_exclude_partitions(self):
+ # Make sure that when perdisk=False partitions (e.g. 'sda1',
+ # 'nvme0n1p1') are skipped and not included in the total count.
+ # https://github.com/giampaolo/psutil/pull/1313#issuecomment-408626842
+ with mock_open_content(
+ '/proc/diskstats',
+ textwrap.dedent("""\
+ 3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
+ 3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
+ """)):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ return_value=False):
+ ret = psutil.disk_io_counters(perdisk=False, nowrap=False)
+ self.assertIsNone(ret)
+
+ #
+ def is_storage_device(name):
+ return name == 'nvme0n1'
+
+ with mock_open_content(
+ '/proc/diskstats',
+ textwrap.dedent("""\
+ 3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
+ 3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
+ """)):
+ with mock.patch('psutil._pslinux.is_storage_device',
+ create=True, side_effect=is_storage_device):
+ ret = psutil.disk_io_counters(perdisk=False, nowrap=False)
+ self.assertEqual(ret.read_count, 1)
+ self.assertEqual(ret.write_count, 5)
+
+ def test_emulate_use_sysfs(self):
+ def exists(path):
+ if path == '/proc/diskstats':
+ return False
+ return True
+
+ wprocfs = psutil.disk_io_counters(perdisk=True)
+ with mock.patch('psutil._pslinux.os.path.exists',
+ create=True, side_effect=exists):
+ wsysfs = psutil.disk_io_counters(perdisk=True)
+ self.assertEqual(len(wprocfs), len(wsysfs))
+
+ def test_emulate_not_impl(self):
+ def exists(path):
+ return False
+
+ with mock.patch('psutil._pslinux.os.path.exists',
+ create=True, side_effect=exists):
+ self.assertRaises(NotImplementedError, psutil.disk_io_counters)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestRootFsDeviceFinder(PsutilTestCase):
+
+ def setUp(self):
+ dev = os.stat("/").st_dev
+ self.major = os.major(dev)
+ self.minor = os.minor(dev)
+
+ def test_call_methods(self):
+ finder = RootFsDeviceFinder()
+ if os.path.exists("/proc/partitions"):
+ finder.ask_proc_partitions()
+ else:
+ self.assertRaises(FileNotFoundError, finder.ask_proc_partitions)
+ if os.path.exists("/sys/dev/block/%s:%s/uevent" % (
+ self.major, self.minor)):
+ finder.ask_sys_dev_block()
+ else:
+ self.assertRaises(FileNotFoundError, finder.ask_sys_dev_block)
+ finder.ask_sys_class_block()
+
+ @unittest.skipIf(GITHUB_ACTIONS, "unsupported on GITHUB_ACTIONS")
+ def test_comparisons(self):
+ finder = RootFsDeviceFinder()
+ self.assertIsNotNone(finder.find())
+
+ a = b = c = None
+ if os.path.exists("/proc/partitions"):
+ a = finder.ask_proc_partitions()
+ if os.path.exists("/sys/dev/block/%s:%s/uevent" % (
+ self.major, self.minor)):
+ b = finder.ask_sys_class_block()
+ c = finder.ask_sys_dev_block()
+
+ base = a or b or c
+ if base and a:
+ self.assertEqual(base, a)
+ if base and b:
+ self.assertEqual(base, b)
+ if base and c:
+ self.assertEqual(base, c)
+
+ @unittest.skipIf(not which("findmnt"), "findmnt utility not available")
+ @unittest.skipIf(GITHUB_ACTIONS, "unsupported on GITHUB_ACTIONS")
+ def test_against_findmnt(self):
+ psutil_value = RootFsDeviceFinder().find()
+ findmnt_value = sh("findmnt -o SOURCE -rn /")
+ self.assertEqual(psutil_value, findmnt_value)
+
+ def test_disk_partitions_mocked(self):
+ with mock.patch(
+ 'psutil._pslinux.cext.disk_partitions',
+ return_value=[('/dev/root', '/', 'ext4', 'rw')]) as m:
+ part = psutil.disk_partitions()[0]
+ assert m.called
+ if not GITHUB_ACTIONS:
+ self.assertNotEqual(part.device, "/dev/root")
+ self.assertEqual(part.device, RootFsDeviceFinder().find())
+ else:
+ self.assertEqual(part.device, "/dev/root")
+
+
+# =====================================================================
+# --- misc
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestMisc(PsutilTestCase):
+
+ def test_boot_time(self):
+ vmstat_value = vmstat('boot time')
+ psutil_value = psutil.boot_time()
+ self.assertEqual(int(vmstat_value), int(psutil_value))
+
+ def test_no_procfs_on_import(self):
+ my_procfs = self.get_testfn()
+ os.mkdir(my_procfs)
+
+ with open(os.path.join(my_procfs, 'stat'), 'w') as f:
+ f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
+ f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
+ f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
+
+ try:
+ orig_open = open
+
+ def open_mock(name, *args, **kwargs):
+ if name.startswith('/proc'):
+ raise IOError(errno.ENOENT, 'rejecting access for test')
+ return orig_open(name, *args, **kwargs)
+
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ reload_module(psutil)
+
+ self.assertRaises(IOError, psutil.cpu_times)
+ self.assertRaises(IOError, psutil.cpu_times, percpu=True)
+ self.assertRaises(IOError, psutil.cpu_percent)
+ self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
+ self.assertRaises(IOError, psutil.cpu_times_percent)
+ self.assertRaises(
+ IOError, psutil.cpu_times_percent, percpu=True)
+
+ psutil.PROCFS_PATH = my_procfs
+
+ self.assertEqual(psutil.cpu_percent(), 0)
+ self.assertEqual(sum(psutil.cpu_times_percent()), 0)
+
+ # since we don't know the number of CPUs at import time,
+ # we awkwardly say there are none until the second call
+ per_cpu_percent = psutil.cpu_percent(percpu=True)
+ self.assertEqual(sum(per_cpu_percent), 0)
+
+ # ditto awkward length
+ per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
+ self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)
+
+ # much user, very busy
+ with open(os.path.join(my_procfs, 'stat'), 'w') as f:
+ f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
+ f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
+ f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
+
+ self.assertNotEqual(psutil.cpu_percent(), 0)
+ self.assertNotEqual(
+ sum(psutil.cpu_percent(percpu=True)), 0)
+ self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
+ self.assertNotEqual(
+ sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
+ finally:
+ shutil.rmtree(my_procfs)
+ reload_module(psutil)
+
+ self.assertEqual(psutil.PROCFS_PATH, '/proc')
+
+ def test_cpu_steal_decrease(self):
+ # Test cumulative cpu stats decrease. We should ignore this.
+ # See issue #1210.
+ with mock_open_content(
+ "/proc/stat",
+ textwrap.dedent("""\
+ cpu 0 0 0 0 0 0 0 1 0 0
+ cpu0 0 0 0 0 0 0 0 1 0 0
+ cpu1 0 0 0 0 0 0 0 1 0 0
+ """).encode()) as m:
+ # first call to "percent" functions should read the new stat file
+ # and compare to the "real" file read at import time - so the
+ # values are meaningless
+ psutil.cpu_percent()
+ assert m.called
+ psutil.cpu_percent(percpu=True)
+ psutil.cpu_times_percent()
+ psutil.cpu_times_percent(percpu=True)
+
+ with mock_open_content(
+ "/proc/stat",
+ textwrap.dedent("""\
+ cpu 1 0 0 0 0 0 0 0 0 0
+ cpu0 1 0 0 0 0 0 0 0 0 0
+ cpu1 1 0 0 0 0 0 0 0 0 0
+ """).encode()) as m:
+ # Increase "user" while steal goes "backwards" to zero.
+ cpu_percent = psutil.cpu_percent()
+ assert m.called
+ cpu_percent_percpu = psutil.cpu_percent(percpu=True)
+ cpu_times_percent = psutil.cpu_times_percent()
+ cpu_times_percent_percpu = psutil.cpu_times_percent(percpu=True)
+ self.assertNotEqual(cpu_percent, 0)
+ self.assertNotEqual(sum(cpu_percent_percpu), 0)
+ self.assertNotEqual(sum(cpu_times_percent), 0)
+ self.assertNotEqual(sum(cpu_times_percent), 100.0)
+ self.assertNotEqual(sum(map(sum, cpu_times_percent_percpu)), 0)
+ self.assertNotEqual(sum(map(sum, cpu_times_percent_percpu)), 100.0)
+ self.assertEqual(cpu_times_percent.steal, 0)
+ self.assertNotEqual(cpu_times_percent.user, 0)
+
+ def test_boot_time_mocked(self):
+ with mock.patch('psutil._common.open', create=True) as m:
+ self.assertRaises(
+ RuntimeError,
+ psutil._pslinux.boot_time)
+ assert m.called
+
+ def test_users_mocked(self):
+ # Make sure ':0' and ':0.0' (returned by C ext) are converted
+ # to 'localhost'.
+ with mock.patch('psutil._pslinux.cext.users',
+ return_value=[('giampaolo', 'pts/2', ':0',
+ 1436573184.0, True, 2)]) as m:
+ self.assertEqual(psutil.users()[0].host, 'localhost')
+ assert m.called
+ with mock.patch('psutil._pslinux.cext.users',
+ return_value=[('giampaolo', 'pts/2', ':0.0',
+ 1436573184.0, True, 2)]) as m:
+ self.assertEqual(psutil.users()[0].host, 'localhost')
+ assert m.called
+ # ...otherwise it should be returned as-is
+ with mock.patch('psutil._pslinux.cext.users',
+ return_value=[('giampaolo', 'pts/2', 'foo',
+ 1436573184.0, True, 2)]) as m:
+ self.assertEqual(psutil.users()[0].host, 'foo')
+ assert m.called
+
+ def test_procfs_path(self):
+ tdir = self.get_testfn()
+ os.mkdir(tdir)
+ try:
+ psutil.PROCFS_PATH = tdir
+ self.assertRaises(IOError, psutil.virtual_memory)
+ self.assertRaises(IOError, psutil.cpu_times)
+ self.assertRaises(IOError, psutil.cpu_times, percpu=True)
+ self.assertRaises(IOError, psutil.boot_time)
+ # self.assertRaises(IOError, psutil.pids)
+ self.assertRaises(IOError, psutil.net_connections)
+ self.assertRaises(IOError, psutil.net_io_counters)
+ self.assertRaises(IOError, psutil.net_if_stats)
+ # self.assertRaises(IOError, psutil.disk_io_counters)
+ self.assertRaises(IOError, psutil.disk_partitions)
+ self.assertRaises(psutil.NoSuchProcess, psutil.Process)
+ finally:
+ psutil.PROCFS_PATH = "/proc"
+
+ @retry_on_failure()
+ def test_issue_687(self):
+ # In case of thread ID:
+ # - pid_exists() is supposed to return False
+ # - Process(tid) is supposed to work
+ # - pids() should not return the TID
+ # See: https://github.com/giampaolo/psutil/issues/687
+ with ThreadTask():
+ p = psutil.Process()
+ threads = p.threads()
+ self.assertEqual(len(threads), 2)
+ tid = sorted(threads, key=lambda x: x.id)[1].id
+ self.assertNotEqual(p.pid, tid)
+ pt = psutil.Process(tid)
+ pt.as_dict()
+ self.assertNotIn(tid, psutil.pids())
+
+ def test_pid_exists_no_proc_status(self):
+ # Internally pid_exists relies on /proc/{pid}/status.
+ # Emulate a case where this file is empty in which case
+ # psutil is supposed to fall back on using pids().
+ with mock_open_content("/proc/%s/status", "") as m:
+ assert psutil.pid_exists(os.getpid())
+ assert m.called
+
+
+# =====================================================================
+# --- sensors
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+@unittest.skipIf(not HAS_BATTERY, "no battery")
+class TestSensorsBattery(PsutilTestCase):
+
+ @unittest.skipIf(not which("acpi"), "acpi utility not available")
+ def test_percent(self):
+ out = sh("acpi -b")
+ acpi_value = int(out.split(",")[1].strip().replace('%', ''))
+ psutil_value = psutil.sensors_battery().percent
+ self.assertAlmostEqual(acpi_value, psutil_value, delta=1)
+
+ def test_emulate_power_plugged(self):
+ # Pretend the AC power cable is connected.
+ def open_mock(name, *args, **kwargs):
+ if name.endswith("AC0/online") or name.endswith("AC/online"):
+ return io.BytesIO(b"1")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ self.assertEqual(psutil.sensors_battery().power_plugged, True)
+ self.assertEqual(
+ psutil.sensors_battery().secsleft, psutil.POWER_TIME_UNLIMITED)
+ assert m.called
+
+ def test_emulate_power_plugged_2(self):
+ # Same as above but pretend /AC0/online does not exist in which
+ # case code relies on /status file.
+ def open_mock(name, *args, **kwargs):
+ if name.endswith("AC0/online") or name.endswith("AC/online"):
+ raise IOError(errno.ENOENT, "")
+ elif name.endswith("/status"):
+ return io.StringIO(u("charging"))
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ self.assertEqual(psutil.sensors_battery().power_plugged, True)
+ assert m.called
+
+ def test_emulate_power_not_plugged(self):
+ # Pretend the AC power cable is not connected.
+ def open_mock(name, *args, **kwargs):
+ if name.endswith("AC0/online") or name.endswith("AC/online"):
+ return io.BytesIO(b"0")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ self.assertEqual(psutil.sensors_battery().power_plugged, False)
+ assert m.called
+
+ def test_emulate_power_not_plugged_2(self):
+ # Same as above but pretend /AC0/online does not exist in which
+ # case code relies on /status file.
+ def open_mock(name, *args, **kwargs):
+ if name.endswith("AC0/online") or name.endswith("AC/online"):
+ raise IOError(errno.ENOENT, "")
+ elif name.endswith("/status"):
+ return io.StringIO(u("discharging"))
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ self.assertEqual(psutil.sensors_battery().power_plugged, False)
+ assert m.called
+
+ def test_emulate_power_undetermined(self):
+ # Pretend we can't know whether the AC power cable not
+ # connected (assert fallback to False).
+ def open_mock(name, *args, **kwargs):
+ if name.startswith("/sys/class/power_supply/AC0/online") or \
+ name.startswith("/sys/class/power_supply/AC/online"):
+ raise IOError(errno.ENOENT, "")
+ elif name.startswith("/sys/class/power_supply/BAT0/status"):
+ return io.BytesIO(b"???")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ self.assertIsNone(psutil.sensors_battery().power_plugged)
+ assert m.called
+
+ def test_emulate_energy_full_0(self):
+ # Emulate a case where energy_full files returns 0.
+ with mock_open_content(
+ "/sys/class/power_supply/BAT0/energy_full", b"0") as m:
+ self.assertEqual(psutil.sensors_battery().percent, 0)
+ assert m.called
+
+ def test_emulate_energy_full_not_avail(self):
+ # Emulate a case where energy_full file does not exist.
+ # Expected fallback on /capacity.
+ with mock_open_exception(
+ "/sys/class/power_supply/BAT0/energy_full",
+ IOError(errno.ENOENT, "")):
+ with mock_open_exception(
+ "/sys/class/power_supply/BAT0/charge_full",
+ IOError(errno.ENOENT, "")):
+ with mock_open_content(
+ "/sys/class/power_supply/BAT0/capacity", b"88"):
+ self.assertEqual(psutil.sensors_battery().percent, 88)
+
+ def test_emulate_no_power(self):
+ # Emulate a case where /AC0/online file nor /BAT0/status exist.
+ with mock_open_exception(
+ "/sys/class/power_supply/AC/online",
+ IOError(errno.ENOENT, "")):
+ with mock_open_exception(
+ "/sys/class/power_supply/AC0/online",
+ IOError(errno.ENOENT, "")):
+ with mock_open_exception(
+ "/sys/class/power_supply/BAT0/status",
+ IOError(errno.ENOENT, "")):
+ self.assertIsNone(psutil.sensors_battery().power_plugged)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSensorsBatteryEmulated(PsutilTestCase):
+
+ def test_it(self):
+ def open_mock(name, *args, **kwargs):
+ if name.endswith("/energy_now"):
+ return io.StringIO(u("60000000"))
+ elif name.endswith("/power_now"):
+ return io.StringIO(u("0"))
+ elif name.endswith("/energy_full"):
+ return io.StringIO(u("60000001"))
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch('os.listdir', return_value=["BAT0"]) as mlistdir:
+ with mock.patch(patch_point, side_effect=open_mock) as mopen:
+ self.assertIsNotNone(psutil.sensors_battery())
+ assert mlistdir.called
+ assert mopen.called
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSensorsTemperatures(PsutilTestCase):
+
+ def test_emulate_class_hwmon(self):
+ def open_mock(name, *args, **kwargs):
+ if name.endswith('/name'):
+ return io.StringIO(u("name"))
+ elif name.endswith('/temp1_label'):
+ return io.StringIO(u("label"))
+ elif name.endswith('/temp1_input'):
+ return io.BytesIO(b"30000")
+ elif name.endswith('/temp1_max'):
+ return io.BytesIO(b"40000")
+ elif name.endswith('/temp1_crit'):
+ return io.BytesIO(b"50000")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ # Test case with /sys/class/hwmon
+ with mock.patch('glob.glob',
+ return_value=['/sys/class/hwmon/hwmon0/temp1']):
+ temp = psutil.sensors_temperatures()['name'][0]
+ self.assertEqual(temp.label, 'label')
+ self.assertEqual(temp.current, 30.0)
+ self.assertEqual(temp.high, 40.0)
+ self.assertEqual(temp.critical, 50.0)
+
+ def test_emulate_class_thermal(self):
+ def open_mock(name, *args, **kwargs):
+ if name.endswith('0_temp'):
+ return io.BytesIO(b"50000")
+ elif name.endswith('temp'):
+ return io.BytesIO(b"30000")
+ elif name.endswith('0_type'):
+ return io.StringIO(u("critical"))
+ elif name.endswith('type'):
+ return io.StringIO(u("name"))
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ def glob_mock(path):
+ if path == '/sys/class/hwmon/hwmon*/temp*_*':
+ return []
+ elif path == '/sys/class/hwmon/hwmon*/device/temp*_*':
+ return []
+ elif path == '/sys/class/thermal/thermal_zone*':
+ return ['/sys/class/thermal/thermal_zone0']
+ elif path == '/sys/class/thermal/thermal_zone0/trip_point*':
+ return ['/sys/class/thermal/thermal_zone1/trip_point_0_type',
+ '/sys/class/thermal/thermal_zone1/trip_point_0_temp']
+ return []
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ with mock.patch('glob.glob', create=True, side_effect=glob_mock):
+ temp = psutil.sensors_temperatures()['name'][0]
+ self.assertEqual(temp.label, '')
+ self.assertEqual(temp.current, 30.0)
+ self.assertEqual(temp.high, 50.0)
+ self.assertEqual(temp.critical, 50.0)
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestSensorsFans(PsutilTestCase):
+
+ def test_emulate_data(self):
+ def open_mock(name, *args, **kwargs):
+ if name.endswith('/name'):
+ return io.StringIO(u("name"))
+ elif name.endswith('/fan1_label'):
+ return io.StringIO(u("label"))
+ elif name.endswith('/fan1_input'):
+ return io.StringIO(u("2000"))
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock):
+ with mock.patch('glob.glob',
+ return_value=['/sys/class/hwmon/hwmon2/fan1']):
+ fan = psutil.sensors_fans()['name'][0]
+ self.assertEqual(fan.label, 'label')
+ self.assertEqual(fan.current, 2000)
+
+
+# =====================================================================
+# --- test process
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestProcess(PsutilTestCase):
+
+ @retry_on_failure()
+ def test_parse_smaps_vs_memory_maps(self):
+ sproc = self.spawn_testproc()
+ uss, pss, swap = psutil._pslinux.Process(sproc.pid)._parse_smaps()
+ maps = psutil.Process(sproc.pid).memory_maps(grouped=False)
+ self.assertAlmostEqual(
+ uss, sum([x.private_dirty + x.private_clean for x in maps]),
+ delta=4096)
+ self.assertAlmostEqual(
+ pss, sum([x.pss for x in maps]), delta=4096)
+ self.assertAlmostEqual(
+ swap, sum([x.swap for x in maps]), delta=4096)
+
+ def test_parse_smaps_mocked(self):
+ # See: https://github.com/giampaolo/psutil/issues/1222
+ with mock_open_content(
+ "/proc/%s/smaps" % os.getpid(),
+ textwrap.dedent("""\
+ fffff0 r-xp 00000000 00:00 0 [vsyscall]
+ Size: 1 kB
+ Rss: 2 kB
+ Pss: 3 kB
+ Shared_Clean: 4 kB
+ Shared_Dirty: 5 kB
+ Private_Clean: 6 kB
+ Private_Dirty: 7 kB
+ Referenced: 8 kB
+ Anonymous: 9 kB
+ LazyFree: 10 kB
+ AnonHugePages: 11 kB
+ ShmemPmdMapped: 12 kB
+ Shared_Hugetlb: 13 kB
+ Private_Hugetlb: 14 kB
+ Swap: 15 kB
+ SwapPss: 16 kB
+ KernelPageSize: 17 kB
+ MMUPageSize: 18 kB
+ Locked: 19 kB
+ VmFlags: rd ex
+ """).encode()) as m:
+ p = psutil._pslinux.Process(os.getpid())
+ uss, pss, swap = p._parse_smaps()
+ assert m.called
+ self.assertEqual(uss, (6 + 7 + 14) * 1024)
+ self.assertEqual(pss, 3 * 1024)
+ self.assertEqual(swap, 15 * 1024)
+
+ # On PYPY file descriptors are not closed fast enough.
+ @unittest.skipIf(PYPY, "unreliable on PYPY")
+ def test_open_files_mode(self):
+ def get_test_file(fname):
+ p = psutil.Process()
+ giveup_at = time.time() + GLOBAL_TIMEOUT
+ while True:
+ for file in p.open_files():
+ if file.path == os.path.abspath(fname):
+ return file
+ elif time.time() > giveup_at:
+ break
+ raise RuntimeError("timeout looking for test file")
+
+ #
+ testfn = self.get_testfn()
+ with open(testfn, "w"):
+ self.assertEqual(get_test_file(testfn).mode, "w")
+ with open(testfn, "r"):
+ self.assertEqual(get_test_file(testfn).mode, "r")
+ with open(testfn, "a"):
+ self.assertEqual(get_test_file(testfn).mode, "a")
+ #
+ with open(testfn, "r+"):
+ self.assertEqual(get_test_file(testfn).mode, "r+")
+ with open(testfn, "w+"):
+ self.assertEqual(get_test_file(testfn).mode, "r+")
+ with open(testfn, "a+"):
+ self.assertEqual(get_test_file(testfn).mode, "a+")
+ # note: "x" bit is not supported
+ if PY3:
+ safe_rmpath(testfn)
+ with open(testfn, "x"):
+ self.assertEqual(get_test_file(testfn).mode, "w")
+ safe_rmpath(testfn)
+ with open(testfn, "x+"):
+ self.assertEqual(get_test_file(testfn).mode, "r+")
+
+ def test_open_files_file_gone(self):
+ # simulates a file which gets deleted during open_files()
+ # execution
+ p = psutil.Process()
+ files = p.open_files()
+ with open(self.get_testfn(), 'w'):
+ # give the kernel some time to see the new file
+ call_until(p.open_files, "len(ret) != %i" % len(files))
+ with mock.patch('psutil._pslinux.os.readlink',
+ side_effect=OSError(errno.ENOENT, "")) as m:
+ files = p.open_files()
+ assert not files
+ assert m.called
+ # also simulate the case where os.readlink() returns EINVAL
+ # in which case psutil is supposed to 'continue'
+ with mock.patch('psutil._pslinux.os.readlink',
+ side_effect=OSError(errno.EINVAL, "")) as m:
+ self.assertEqual(p.open_files(), [])
+ assert m.called
+
+ def test_open_files_fd_gone(self):
+ # Simulate a case where /proc/{pid}/fdinfo/{fd} disappears
+ # while iterating through fds.
+ # https://travis-ci.org/giampaolo/psutil/jobs/225694530
+ p = psutil.Process()
+ files = p.open_files()
+ with open(self.get_testfn(), 'w'):
+ # give the kernel some time to see the new file
+ call_until(p.open_files, "len(ret) != %i" % len(files))
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point,
+ side_effect=IOError(errno.ENOENT, "")) as m:
+ files = p.open_files()
+ assert not files
+ assert m.called
+
+ def test_open_files_enametoolong(self):
+ # Simulate a case where /proc/{pid}/fd/{fd} symlink
+ # points to a file with full path longer than PATH_MAX, see:
+ # https://github.com/giampaolo/psutil/issues/1940
+ p = psutil.Process()
+ files = p.open_files()
+ with open(self.get_testfn(), 'w'):
+ # give the kernel some time to see the new file
+ call_until(p.open_files, "len(ret) != %i" % len(files))
+ patch_point = 'psutil._pslinux.os.readlink'
+ with mock.patch(patch_point,
+ side_effect=OSError(errno.ENAMETOOLONG, "")) as m:
+ with mock.patch("psutil._pslinux.debug"):
+ files = p.open_files()
+ assert not files
+ assert m.called
+
+ # --- mocked tests
+
+ def test_terminal_mocked(self):
+ with mock.patch('psutil._pslinux._psposix.get_terminal_map',
+ return_value={}) as m:
+ self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal())
+ assert m.called
+
+ # TODO: re-enable this test.
+ # def test_num_ctx_switches_mocked(self):
+ # with mock.patch('psutil._common.open', create=True) as m:
+ # self.assertRaises(
+ # NotImplementedError,
+ # psutil._pslinux.Process(os.getpid()).num_ctx_switches)
+ # assert m.called
+
+ def test_cmdline_mocked(self):
+ # see: https://github.com/giampaolo/psutil/issues/639
+ p = psutil.Process()
+ fake_file = io.StringIO(u('foo\x00bar\x00'))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(p.cmdline(), ['foo', 'bar'])
+ assert m.called
+ fake_file = io.StringIO(u('foo\x00bar\x00\x00'))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(p.cmdline(), ['foo', 'bar', ''])
+ assert m.called
+
+ def test_cmdline_spaces_mocked(self):
+ # see: https://github.com/giampaolo/psutil/issues/1179
+ p = psutil.Process()
+ fake_file = io.StringIO(u('foo bar '))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(p.cmdline(), ['foo', 'bar'])
+ assert m.called
+ fake_file = io.StringIO(u('foo bar '))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(p.cmdline(), ['foo', 'bar', ''])
+ assert m.called
+
+ def test_cmdline_mixed_separators(self):
+ # https://github.com/giampaolo/psutil/issues/
+ # 1179#issuecomment-552984549
+ p = psutil.Process()
+ fake_file = io.StringIO(u('foo\x20bar\x00'))
+ with mock.patch('psutil._common.open',
+ return_value=fake_file, create=True) as m:
+ self.assertEqual(p.cmdline(), ['foo', 'bar'])
+ assert m.called
+
+ def test_readlink_path_deleted_mocked(self):
+ with mock.patch('psutil._pslinux.os.readlink',
+ return_value='/home/foo (deleted)'):
+ self.assertEqual(psutil.Process().exe(), "/home/foo")
+ self.assertEqual(psutil.Process().cwd(), "/home/foo")
+
+ def test_threads_mocked(self):
+ # Test the case where os.listdir() returns a file (thread)
+ # which no longer exists by the time we open() it (race
+ # condition). threads() is supposed to ignore that instead
+ # of raising NSP.
+ def open_mock(name, *args, **kwargs):
+ if name.startswith('/proc/%s/task' % os.getpid()):
+ raise IOError(errno.ENOENT, "")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ orig_open = open
+ patch_point = 'builtins.open' if PY3 else '__builtin__.open'
+ with mock.patch(patch_point, side_effect=open_mock) as m:
+ ret = psutil.Process().threads()
+ assert m.called
+ self.assertEqual(ret, [])
+
+ # ...but if it bumps into something != ENOENT we want an
+ # exception.
+ def open_mock(name, *args, **kwargs):
+ if name.startswith('/proc/%s/task' % os.getpid()):
+ raise IOError(errno.EPERM, "")
+ else:
+ return orig_open(name, *args, **kwargs)
+
+ with mock.patch(patch_point, side_effect=open_mock):
+ self.assertRaises(psutil.AccessDenied, psutil.Process().threads)
+
+ def test_exe_mocked(self):
+ with mock.patch('psutil._pslinux.readlink',
+ side_effect=OSError(errno.ENOENT, "")) as m1:
+ with mock.patch('psutil.Process.cmdline',
+ side_effect=psutil.AccessDenied(0, "")) as m2:
+ # No such file error; might be raised also if /proc/pid/exe
+ # path actually exists for system processes with low pids
+ # (about 0-20). In this case psutil is supposed to return
+ # an empty string.
+ ret = psutil.Process().exe()
+ assert m1.called
+ assert m2.called
+ self.assertEqual(ret, "")
+
+ # ...but if /proc/pid no longer exist we're supposed to treat
+ # it as an alias for zombie process
+ with mock.patch('psutil._pslinux.os.path.lexists',
+ return_value=False):
+ self.assertRaises(
+ psutil.ZombieProcess, psutil.Process().exe)
+
+ def test_issue_1014(self):
+ # Emulates a case where smaps file does not exist. In this case
+ # wrap_exception decorator should not raise NoSuchProcess.
+ with mock_open_exception(
+ '/proc/%s/smaps' % os.getpid(),
+ IOError(errno.ENOENT, "")) as m:
+ p = psutil.Process()
+ with self.assertRaises(FileNotFoundError):
+ p.memory_maps()
+ assert m.called
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_zombie(self):
+ # Emulate a case where rlimit() raises ENOSYS, which may
+ # happen in case of zombie process:
+ # https://travis-ci.org/giampaolo/psutil/jobs/51368273
+ with mock.patch("psutil._pslinux.prlimit",
+ side_effect=OSError(errno.ENOSYS, "")) as m:
+ p = psutil.Process()
+ p.name()
+ with self.assertRaises(psutil.ZombieProcess) as exc:
+ p.rlimit(psutil.RLIMIT_NOFILE)
+ assert m.called
+ self.assertEqual(exc.exception.pid, p.pid)
+ self.assertEqual(exc.exception.name, p.name())
+
+ def test_cwd_zombie(self):
+ with mock.patch("psutil._pslinux.os.readlink",
+ side_effect=OSError(errno.ENOENT, "")) as m:
+ p = psutil.Process()
+ p.name()
+ with self.assertRaises(psutil.ZombieProcess) as exc:
+ p.cwd()
+ assert m.called
+ self.assertEqual(exc.exception.pid, p.pid)
+ self.assertEqual(exc.exception.name, p.name())
+
+ def test_stat_file_parsing(self):
+ args = [
+ "0", # pid
+ "(cat)", # name
+ "Z", # status
+ "1", # ppid
+ "0", # pgrp
+ "0", # session
+ "0", # tty
+ "0", # tpgid
+ "0", # flags
+ "0", # minflt
+ "0", # cminflt
+ "0", # majflt
+ "0", # cmajflt
+ "2", # utime
+ "3", # stime
+ "4", # cutime
+ "5", # cstime
+ "0", # priority
+ "0", # nice
+ "0", # num_threads
+ "0", # itrealvalue
+ "6", # starttime
+ "0", # vsize
+ "0", # rss
+ "0", # rsslim
+ "0", # startcode
+ "0", # endcode
+ "0", # startstack
+ "0", # kstkesp
+ "0", # kstkeip
+ "0", # signal
+ "0", # blocked
+ "0", # sigignore
+ "0", # sigcatch
+ "0", # wchan
+ "0", # nswap
+ "0", # cnswap
+ "0", # exit_signal
+ "6", # processor
+ "0", # rt priority
+ "0", # policy
+ "7", # delayacct_blkio_ticks
+ ]
+ content = " ".join(args).encode()
+ with mock_open_content('/proc/%s/stat' % os.getpid(), content):
+ p = psutil.Process()
+ self.assertEqual(p.name(), 'cat')
+ self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
+ self.assertEqual(p.ppid(), 1)
+ self.assertEqual(
+ p.create_time(), 6 / CLOCK_TICKS + psutil.boot_time())
+ cpu = p.cpu_times()
+ self.assertEqual(cpu.user, 2 / CLOCK_TICKS)
+ self.assertEqual(cpu.system, 3 / CLOCK_TICKS)
+ self.assertEqual(cpu.children_user, 4 / CLOCK_TICKS)
+ self.assertEqual(cpu.children_system, 5 / CLOCK_TICKS)
+ self.assertEqual(cpu.iowait, 7 / CLOCK_TICKS)
+ self.assertEqual(p.cpu_num(), 6)
+
+ def test_status_file_parsing(self):
+ with mock_open_content(
+ '/proc/%s/status' % os.getpid(),
+ textwrap.dedent("""\
+ Uid:\t1000\t1001\t1002\t1003
+ Gid:\t1004\t1005\t1006\t1007
+ Threads:\t66
+ Cpus_allowed:\tf
+ Cpus_allowed_list:\t0-7
+ voluntary_ctxt_switches:\t12
+ nonvoluntary_ctxt_switches:\t13""").encode()):
+ p = psutil.Process()
+ self.assertEqual(p.num_ctx_switches().voluntary, 12)
+ self.assertEqual(p.num_ctx_switches().involuntary, 13)
+ self.assertEqual(p.num_threads(), 66)
+ uids = p.uids()
+ self.assertEqual(uids.real, 1000)
+ self.assertEqual(uids.effective, 1001)
+ self.assertEqual(uids.saved, 1002)
+ gids = p.gids()
+ self.assertEqual(gids.real, 1004)
+ self.assertEqual(gids.effective, 1005)
+ self.assertEqual(gids.saved, 1006)
+ self.assertEqual(p._proc._get_eligible_cpus(), list(range(0, 8)))
+
+ def test_connections_enametoolong(self):
+ # Simulate a case where /proc/{pid}/fd/{fd} symlink points to
+ # a file with full path longer than PATH_MAX, see:
+ # https://github.com/giampaolo/psutil/issues/1940
+ with mock.patch('psutil._pslinux.os.readlink',
+ side_effect=OSError(errno.ENAMETOOLONG, "")) as m:
+ p = psutil.Process()
+ with mock.patch("psutil._pslinux.debug"):
+ assert not p.connections()
+ assert m.called
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestProcessAgainstStatus(PsutilTestCase):
+ """/proc/pid/stat and /proc/pid/status have many values in common.
+ Whenever possible, psutil uses /proc/pid/stat (it's faster).
+ For all those cases we check that the value found in
+ /proc/pid/stat (by psutil) matches the one found in
+ /proc/pid/status.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.proc = psutil.Process()
+
+ def read_status_file(self, linestart):
+ with psutil._psplatform.open_text(
+ '/proc/%s/status' % self.proc.pid) as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith(linestart):
+ value = line.partition('\t')[2]
+ try:
+ return int(value)
+ except ValueError:
+ return value
+ raise ValueError("can't find %r" % linestart)
+
+ def test_name(self):
+ value = self.read_status_file("Name:")
+ self.assertEqual(self.proc.name(), value)
+
+ def test_status(self):
+ value = self.read_status_file("State:")
+ value = value[value.find('(') + 1:value.rfind(')')]
+ value = value.replace(' ', '-')
+ self.assertEqual(self.proc.status(), value)
+
+ def test_ppid(self):
+ value = self.read_status_file("PPid:")
+ self.assertEqual(self.proc.ppid(), value)
+
+ def test_num_threads(self):
+ value = self.read_status_file("Threads:")
+ self.assertEqual(self.proc.num_threads(), value)
+
+ def test_uids(self):
+ value = self.read_status_file("Uid:")
+ value = tuple(map(int, value.split()[1:4]))
+ self.assertEqual(self.proc.uids(), value)
+
+ def test_gids(self):
+ value = self.read_status_file("Gid:")
+ value = tuple(map(int, value.split()[1:4]))
+ self.assertEqual(self.proc.gids(), value)
+
+ @retry_on_failure()
+ def test_num_ctx_switches(self):
+ value = self.read_status_file("voluntary_ctxt_switches:")
+ self.assertEqual(self.proc.num_ctx_switches().voluntary, value)
+ value = self.read_status_file("nonvoluntary_ctxt_switches:")
+ self.assertEqual(self.proc.num_ctx_switches().involuntary, value)
+
+ def test_cpu_affinity(self):
+ value = self.read_status_file("Cpus_allowed_list:")
+ if '-' in str(value):
+ min_, max_ = map(int, value.split('-'))
+ self.assertEqual(
+ self.proc.cpu_affinity(), list(range(min_, max_ + 1)))
+
+ def test_cpu_affinity_eligible_cpus(self):
+ value = self.read_status_file("Cpus_allowed_list:")
+ with mock.patch("psutil._pslinux.per_cpu_times") as m:
+ self.proc._proc._get_eligible_cpus()
+ if '-' in str(value):
+ assert not m.called
+ else:
+ assert m.called
+
+
+# =====================================================================
+# --- test utils
+# =====================================================================
+
+
+@unittest.skipIf(not LINUX, "LINUX only")
+class TestUtils(PsutilTestCase):
+
+ def test_readlink(self):
+ with mock.patch("os.readlink", return_value="foo (deleted)") as m:
+ self.assertEqual(psutil._psplatform.readlink("bar"), "foo")
+ assert m.called
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_memleaks.py b/lib/psutil/tests/test_memleaks.py
new file mode 100644
index 0000000..dbd1588
--- /dev/null
+++ b/lib/psutil/tests/test_memleaks.py
@@ -0,0 +1,492 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests for detecting function memory leaks (typically the ones
+implemented in C). It does so by calling a function many times and
+checking whether process memory usage keeps increasing between
+calls or over time.
+Note that this may produce false positives (especially on Windows
+for some reason).
+PyPy appears to be completely unstable for this framework, probably
+because of how its JIT handles memory, so tests are skipped.
+"""
+
+from __future__ import print_function
+
+import functools
+import os
+import platform
+import unittest
+
+import psutil
+import psutil._common
+from psutil import LINUX
+from psutil import MACOS
+from psutil import OPENBSD
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._compat import ProcessLookupError
+from psutil._compat import super
+from psutil.tests import HAS_CPU_AFFINITY
+from psutil.tests import HAS_CPU_FREQ
+from psutil.tests import HAS_ENVIRON
+from psutil.tests import HAS_IONICE
+from psutil.tests import HAS_MEMORY_MAPS
+from psutil.tests import HAS_NET_IO_COUNTERS
+from psutil.tests import HAS_PROC_CPU_NUM
+from psutil.tests import HAS_PROC_IO_COUNTERS
+from psutil.tests import HAS_RLIMIT
+from psutil.tests import HAS_SENSORS_BATTERY
+from psutil.tests import HAS_SENSORS_FANS
+from psutil.tests import HAS_SENSORS_TEMPERATURES
+from psutil.tests import TestMemoryLeak
+from psutil.tests import create_sockets
+from psutil.tests import get_testfn
+from psutil.tests import process_namespace
+from psutil.tests import skip_on_access_denied
+from psutil.tests import spawn_testproc
+from psutil.tests import system_namespace
+from psutil.tests import terminate
+
+
+cext = psutil._psplatform.cext
+thisproc = psutil.Process()
+FEW_TIMES = 5
+
+
+def fewtimes_if_linux():
+ """Decorator for those Linux functions which are implemented in pure
+ Python, and which we want to run faster.
+ """
+ def decorator(fun):
+ @functools.wraps(fun)
+ def wrapper(self, *args, **kwargs):
+ if LINUX:
+ before = self.__class__.times
+ try:
+ self.__class__.times = FEW_TIMES
+ return fun(self, *args, **kwargs)
+ finally:
+ self.__class__.times = before
+ else:
+ return fun(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+# ===================================================================
+# Process class
+# ===================================================================
+
+
+class TestProcessObjectLeaks(TestMemoryLeak):
+ """Test leaks of Process class methods."""
+
+ proc = thisproc
+
+ def test_coverage(self):
+ ns = process_namespace(None)
+ ns.test_class_coverage(self, ns.getters + ns.setters)
+
+ @fewtimes_if_linux()
+ def test_name(self):
+ self.execute(self.proc.name)
+
+ @fewtimes_if_linux()
+ def test_cmdline(self):
+ self.execute(self.proc.cmdline)
+
+ @fewtimes_if_linux()
+ def test_exe(self):
+ self.execute(self.proc.exe)
+
+ @fewtimes_if_linux()
+ def test_ppid(self):
+ self.execute(self.proc.ppid)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @fewtimes_if_linux()
+ def test_uids(self):
+ self.execute(self.proc.uids)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @fewtimes_if_linux()
+ def test_gids(self):
+ self.execute(self.proc.gids)
+
+ @fewtimes_if_linux()
+ def test_status(self):
+ self.execute(self.proc.status)
+
+ def test_nice(self):
+ self.execute(self.proc.nice)
+
+ def test_nice_set(self):
+ niceness = thisproc.nice()
+ self.execute(lambda: self.proc.nice(niceness))
+
+ @unittest.skipIf(not HAS_IONICE, "not supported")
+ def test_ionice(self):
+ self.execute(self.proc.ionice)
+
+ @unittest.skipIf(not HAS_IONICE, "not supported")
+ def test_ionice_set(self):
+ if WINDOWS:
+ value = thisproc.ionice()
+ self.execute(lambda: self.proc.ionice(value))
+ else:
+ self.execute(lambda: self.proc.ionice(psutil.IOPRIO_CLASS_NONE))
+ fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0)
+ self.execute_w_exc(OSError, fun)
+
+ @unittest.skipIf(not HAS_PROC_IO_COUNTERS, "not supported")
+ @fewtimes_if_linux()
+ def test_io_counters(self):
+ self.execute(self.proc.io_counters)
+
+ @unittest.skipIf(POSIX, "worthless on POSIX")
+ def test_username(self):
+ # always open 1 handle on Windows (only once)
+ psutil.Process().username()
+ self.execute(self.proc.username)
+
+ @fewtimes_if_linux()
+ def test_create_time(self):
+ self.execute(self.proc.create_time)
+
+ @fewtimes_if_linux()
+ @skip_on_access_denied(only_if=OPENBSD)
+ def test_num_threads(self):
+ self.execute(self.proc.num_threads)
+
+ @unittest.skipIf(not WINDOWS, "WINDOWS only")
+ def test_num_handles(self):
+ self.execute(self.proc.num_handles)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @fewtimes_if_linux()
+ def test_num_fds(self):
+ self.execute(self.proc.num_fds)
+
+ @fewtimes_if_linux()
+ def test_num_ctx_switches(self):
+ self.execute(self.proc.num_ctx_switches)
+
+ @fewtimes_if_linux()
+ @skip_on_access_denied(only_if=OPENBSD)
+ def test_threads(self):
+ self.execute(self.proc.threads)
+
+ @fewtimes_if_linux()
+ def test_cpu_times(self):
+ self.execute(self.proc.cpu_times)
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported")
+ def test_cpu_num(self):
+ self.execute(self.proc.cpu_num)
+
+ @fewtimes_if_linux()
+ def test_memory_info(self):
+ self.execute(self.proc.memory_info)
+
+ @fewtimes_if_linux()
+ def test_memory_full_info(self):
+ self.execute(self.proc.memory_full_info)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @fewtimes_if_linux()
+ def test_terminal(self):
+ self.execute(self.proc.terminal)
+
+ def test_resume(self):
+ times = FEW_TIMES if POSIX else self.times
+ self.execute(self.proc.resume, times=times)
+
+ @fewtimes_if_linux()
+ def test_cwd(self):
+ self.execute(self.proc.cwd)
+
+ @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported")
+ def test_cpu_affinity(self):
+ self.execute(self.proc.cpu_affinity)
+
+ @unittest.skipIf(not HAS_CPU_AFFINITY, "not supported")
+ def test_cpu_affinity_set(self):
+ affinity = thisproc.cpu_affinity()
+ self.execute(lambda: self.proc.cpu_affinity(affinity))
+ self.execute_w_exc(
+ ValueError, lambda: self.proc.cpu_affinity([-1]))
+
+ @fewtimes_if_linux()
+ def test_open_files(self):
+ with open(get_testfn(), 'w'):
+ self.execute(self.proc.open_files)
+
+ @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
+ @fewtimes_if_linux()
+ def test_memory_maps(self):
+ self.execute(self.proc.memory_maps)
+
+ @unittest.skipIf(not LINUX, "LINUX only")
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit(self):
+ self.execute(lambda: self.proc.rlimit(psutil.RLIMIT_NOFILE))
+
+ @unittest.skipIf(not LINUX, "LINUX only")
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_set(self):
+ limit = thisproc.rlimit(psutil.RLIMIT_NOFILE)
+ self.execute(lambda: self.proc.rlimit(psutil.RLIMIT_NOFILE, limit))
+ self.execute_w_exc((OSError, ValueError), lambda: self.proc.rlimit(-1))
+
+ @fewtimes_if_linux()
+ # Windows implementation is based on a single system-wide
+ # function (tested later).
+ @unittest.skipIf(WINDOWS, "worthless on WINDOWS")
+ def test_connections(self):
+ # TODO: UNIX sockets are temporarily implemented by parsing
+ # 'pfiles' cmd output; we don't want that part of the code to
+ # be executed.
+ with create_sockets():
+ kind = 'inet' if SUNOS else 'all'
+ self.execute(lambda: self.proc.connections(kind))
+
+ @unittest.skipIf(not HAS_ENVIRON, "not supported")
+ def test_environ(self):
+ self.execute(self.proc.environ)
+
+ @unittest.skipIf(not WINDOWS, "WINDOWS only")
+ def test_proc_info(self):
+ self.execute(lambda: cext.proc_info(os.getpid()))
+
+
+class TestTerminatedProcessLeaks(TestProcessObjectLeaks):
+ """Repeat the tests above looking for leaks occurring when dealing
+ with terminated processes raising NoSuchProcess exception.
+ The C functions are still invoked but will follow different code
+ paths. We'll check those code paths.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.subp = spawn_testproc()
+ cls.proc = psutil.Process(cls.subp.pid)
+ cls.proc.kill()
+ cls.proc.wait()
+
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+ terminate(cls.subp)
+
+ def call(self, fun):
+ try:
+ fun()
+ except psutil.NoSuchProcess:
+ pass
+
+ if WINDOWS:
+
+ def test_kill(self):
+ self.execute(self.proc.kill)
+
+ def test_terminate(self):
+ self.execute(self.proc.terminate)
+
+ def test_suspend(self):
+ self.execute(self.proc.suspend)
+
+ def test_resume(self):
+ self.execute(self.proc.resume)
+
+ def test_wait(self):
+ self.execute(self.proc.wait)
+
+ def test_proc_info(self):
+ # test dual implementation
+ def call():
+ try:
+ return cext.proc_info(self.proc.pid)
+ except ProcessLookupError:
+ pass
+
+ self.execute(call)
+
+
+@unittest.skipIf(not WINDOWS, "WINDOWS only")
+class TestProcessDualImplementation(TestMemoryLeak):
+
+ def test_cmdline_peb_true(self):
+ self.execute(lambda: cext.proc_cmdline(os.getpid(), use_peb=True))
+
+ def test_cmdline_peb_false(self):
+ self.execute(lambda: cext.proc_cmdline(os.getpid(), use_peb=False))
+
+
+# ===================================================================
+# system APIs
+# ===================================================================
+
+
+class TestModuleFunctionsLeaks(TestMemoryLeak):
+ """Test leaks of psutil module functions."""
+
+ def test_coverage(self):
+ ns = system_namespace()
+ ns.test_class_coverage(self, ns.all)
+
+ # --- cpu
+
+ @fewtimes_if_linux()
+ def test_cpu_count(self): # logical
+ self.execute(lambda: psutil.cpu_count(logical=True))
+
+ @fewtimes_if_linux()
+ def test_cpu_count_cores(self):
+ self.execute(lambda: psutil.cpu_count(logical=False))
+
+ @fewtimes_if_linux()
+ def test_cpu_times(self):
+ self.execute(psutil.cpu_times)
+
+ @fewtimes_if_linux()
+ def test_per_cpu_times(self):
+ self.execute(lambda: psutil.cpu_times(percpu=True))
+
+ @fewtimes_if_linux()
+ def test_cpu_stats(self):
+ self.execute(psutil.cpu_stats)
+
+ @fewtimes_if_linux()
+ # TODO: remove this once 1892 is fixed
+ @unittest.skipIf(MACOS and platform.machine() == 'arm64',
+ "skipped due to #1892")
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_cpu_freq(self):
+ self.execute(psutil.cpu_freq)
+
+ @unittest.skipIf(not WINDOWS, "WINDOWS only")
+ def test_getloadavg(self):
+ psutil.getloadavg()
+ self.execute(psutil.getloadavg)
+
+ # --- mem
+
+ def test_virtual_memory(self):
+ self.execute(psutil.virtual_memory)
+
+ # TODO: remove this skip when this gets fixed
+ @unittest.skipIf(SUNOS, "worthless on SUNOS (uses a subprocess)")
+ def test_swap_memory(self):
+ self.execute(psutil.swap_memory)
+
+ def test_pid_exists(self):
+ times = FEW_TIMES if POSIX else self.times
+ self.execute(lambda: psutil.pid_exists(os.getpid()), times=times)
+
+ # --- disk
+
+ def test_disk_usage(self):
+ times = FEW_TIMES if POSIX else self.times
+ self.execute(lambda: psutil.disk_usage('.'), times=times)
+
+ def test_disk_partitions(self):
+ self.execute(psutil.disk_partitions)
+
+ @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
+ '/proc/diskstats not available on this Linux version')
+ @fewtimes_if_linux()
+ def test_disk_io_counters(self):
+ self.execute(lambda: psutil.disk_io_counters(nowrap=False))
+
+ # --- proc
+
+ @fewtimes_if_linux()
+ def test_pids(self):
+ self.execute(psutil.pids)
+
+ # --- net
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
+ def test_net_io_counters(self):
+ self.execute(lambda: psutil.net_io_counters(nowrap=False))
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(MACOS and os.getuid() != 0, "need root access")
+ def test_net_connections(self):
+ # always opens and handle on Windows() (once)
+ psutil.net_connections(kind='all')
+ with create_sockets():
+ self.execute(lambda: psutil.net_connections(kind='all'))
+
+ def test_net_if_addrs(self):
+ # Note: verified that on Windows this was a false positive.
+ tolerance = 80 * 1024 if WINDOWS else self.tolerance
+ self.execute(psutil.net_if_addrs, tolerance=tolerance)
+
+ def test_net_if_stats(self):
+ self.execute(psutil.net_if_stats)
+
+ # --- sensors
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
+ def test_sensors_battery(self):
+ self.execute(psutil.sensors_battery)
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
+ def test_sensors_temperatures(self):
+ self.execute(psutil.sensors_temperatures)
+
+ @fewtimes_if_linux()
+ @unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
+ def test_sensors_fans(self):
+ self.execute(psutil.sensors_fans)
+
+ # --- others
+
+ @fewtimes_if_linux()
+ def test_boot_time(self):
+ self.execute(psutil.boot_time)
+
+ def test_users(self):
+ self.execute(psutil.users)
+
+ def test_set_debug(self):
+ self.execute(lambda: psutil._set_debug(False))
+
+ if WINDOWS:
+
+ # --- win services
+
+ def test_win_service_iter(self):
+ self.execute(cext.winservice_enumerate)
+
+ def test_win_service_get(self):
+ pass
+
+ def test_win_service_get_config(self):
+ name = next(psutil.win_service_iter()).name()
+ self.execute(lambda: cext.winservice_query_config(name))
+
+ def test_win_service_get_status(self):
+ name = next(psutil.win_service_iter()).name()
+ self.execute(lambda: cext.winservice_query_status(name))
+
+ def test_win_service_get_description(self):
+ name = next(psutil.win_service_iter()).name()
+ self.execute(lambda: cext.winservice_query_descr(name))
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_misc.py b/lib/psutil/tests/test_misc.py
new file mode 100644
index 0000000..e22789c
--- /dev/null
+++ b/lib/psutil/tests/test_misc.py
@@ -0,0 +1,852 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Miscellaneous tests.
+"""
+
+import ast
+import collections
+import errno
+import json
+import os
+import pickle
+import socket
+import stat
+import unittest
+
+import psutil
+import psutil.tests
+from psutil import LINUX
+from psutil import POSIX
+from psutil import WINDOWS
+from psutil._common import bcat
+from psutil._common import cat
+from psutil._common import debug
+from psutil._common import isfile_strict
+from psutil._common import memoize
+from psutil._common import memoize_when_activated
+from psutil._common import parse_environ_block
+from psutil._common import supports_ipv6
+from psutil._common import wrap_numbers
+from psutil._compat import PY3
+from psutil._compat import FileNotFoundError
+from psutil._compat import redirect_stderr
+from psutil.tests import APPVEYOR
+from psutil.tests import CI_TESTING
+from psutil.tests import HAS_BATTERY
+from psutil.tests import HAS_MEMORY_MAPS
+from psutil.tests import HAS_NET_IO_COUNTERS
+from psutil.tests import HAS_SENSORS_BATTERY
+from psutil.tests import HAS_SENSORS_FANS
+from psutil.tests import HAS_SENSORS_TEMPERATURES
+from psutil.tests import PYTHON_EXE
+from psutil.tests import ROOT_DIR
+from psutil.tests import SCRIPTS_DIR
+from psutil.tests import PsutilTestCase
+from psutil.tests import import_module_by_path
+from psutil.tests import mock
+from psutil.tests import reload_module
+from psutil.tests import sh
+
+
+# ===================================================================
+# --- Test classes' repr(), str(), ...
+# ===================================================================
+
+
+class TestSpecialMethods(PsutilTestCase):
+
+ def test_process__repr__(self, func=repr):
+ p = psutil.Process(self.spawn_testproc().pid)
+ r = func(p)
+ self.assertIn("psutil.Process", r)
+ self.assertIn("pid=%s" % p.pid, r)
+ self.assertIn("name='%s'" % str(p.name()),
+ r.replace("name=u'", "name='"))
+ self.assertIn("status=", r)
+ self.assertNotIn("exitcode=", r)
+ p.terminate()
+ p.wait()
+ r = func(p)
+ self.assertIn("status='terminated'", r)
+ self.assertIn("exitcode=", r)
+
+ with mock.patch.object(psutil.Process, "name",
+ side_effect=psutil.ZombieProcess(os.getpid())):
+ p = psutil.Process()
+ r = func(p)
+ self.assertIn("pid=%s" % p.pid, r)
+ self.assertIn("status='zombie'", r)
+ self.assertNotIn("name=", r)
+ with mock.patch.object(psutil.Process, "name",
+ side_effect=psutil.NoSuchProcess(os.getpid())):
+ p = psutil.Process()
+ r = func(p)
+ self.assertIn("pid=%s" % p.pid, r)
+ self.assertIn("terminated", r)
+ self.assertNotIn("name=", r)
+ with mock.patch.object(psutil.Process, "name",
+ side_effect=psutil.AccessDenied(os.getpid())):
+ p = psutil.Process()
+ r = func(p)
+ self.assertIn("pid=%s" % p.pid, r)
+ self.assertNotIn("name=", r)
+
+ def test_process__str__(self):
+ self.test_process__repr__(func=str)
+
+ def test_error__repr__(self):
+ self.assertEqual(repr(psutil.Error()), "psutil.Error()")
+
+ def test_error__str__(self):
+ self.assertEqual(str(psutil.Error()), "")
+
+ def test_no_such_process__repr__(self):
+ self.assertEqual(
+ repr(psutil.NoSuchProcess(321)),
+ "psutil.NoSuchProcess(pid=321, msg='process no longer exists')")
+ self.assertEqual(
+ repr(psutil.NoSuchProcess(321, name="name", msg="msg")),
+ "psutil.NoSuchProcess(pid=321, name='name', msg='msg')")
+
+ def test_no_such_process__str__(self):
+ self.assertEqual(
+ str(psutil.NoSuchProcess(321)),
+ "process no longer exists (pid=321)")
+ self.assertEqual(
+ str(psutil.NoSuchProcess(321, name="name", msg="msg")),
+ "msg (pid=321, name='name')")
+
+ def test_zombie_process__repr__(self):
+ self.assertEqual(
+ repr(psutil.ZombieProcess(321)),
+ 'psutil.ZombieProcess(pid=321, msg="PID still '
+ 'exists but it\'s a zombie")')
+ self.assertEqual(
+ repr(psutil.ZombieProcess(321, name="name", ppid=320, msg="foo")),
+ "psutil.ZombieProcess(pid=321, ppid=320, name='name', msg='foo')")
+
+ def test_zombie_process__str__(self):
+ self.assertEqual(
+ str(psutil.ZombieProcess(321)),
+ "PID still exists but it's a zombie (pid=321)")
+ self.assertEqual(
+ str(psutil.ZombieProcess(321, name="name", ppid=320, msg="foo")),
+ "foo (pid=321, ppid=320, name='name')")
+
+ def test_access_denied__repr__(self):
+ self.assertEqual(
+ repr(psutil.AccessDenied(321)),
+ "psutil.AccessDenied(pid=321)")
+ self.assertEqual(
+ repr(psutil.AccessDenied(321, name="name", msg="msg")),
+ "psutil.AccessDenied(pid=321, name='name', msg='msg')")
+
+ def test_access_denied__str__(self):
+ self.assertEqual(
+ str(psutil.AccessDenied(321)),
+ "(pid=321)")
+ self.assertEqual(
+ str(psutil.AccessDenied(321, name="name", msg="msg")),
+ "msg (pid=321, name='name')")
+
+ def test_timeout_expired__repr__(self):
+ self.assertEqual(
+ repr(psutil.TimeoutExpired(5)),
+ "psutil.TimeoutExpired(seconds=5, msg='timeout after 5 seconds')")
+ self.assertEqual(
+ repr(psutil.TimeoutExpired(5, pid=321, name="name")),
+ "psutil.TimeoutExpired(pid=321, name='name', seconds=5, "
+ "msg='timeout after 5 seconds')")
+
+ def test_timeout_expired__str__(self):
+ self.assertEqual(
+ str(psutil.TimeoutExpired(5)),
+ "timeout after 5 seconds")
+ self.assertEqual(
+ str(psutil.TimeoutExpired(5, pid=321, name="name")),
+ "timeout after 5 seconds (pid=321, name='name')")
+
+ def test_process__eq__(self):
+ p1 = psutil.Process()
+ p2 = psutil.Process()
+ self.assertEqual(p1, p2)
+ p2._ident = (0, 0)
+ self.assertNotEqual(p1, p2)
+ self.assertNotEqual(p1, 'foo')
+
+ def test_process__hash__(self):
+ s = set([psutil.Process(), psutil.Process()])
+ self.assertEqual(len(s), 1)
+
+
+# ===================================================================
+# --- Misc, generic, corner cases
+# ===================================================================
+
+
+class TestMisc(PsutilTestCase):
+
+ def test__all__(self):
+ dir_psutil = dir(psutil)
+ for name in dir_psutil:
+ if name in ('long', 'tests', 'test', 'PermissionError',
+ 'ProcessLookupError'):
+ continue
+ if not name.startswith('_'):
+ try:
+ __import__(name)
+ except ImportError:
+ if name not in psutil.__all__:
+ fun = getattr(psutil, name)
+ if fun is None:
+ continue
+ if (fun.__doc__ is not None and
+ 'deprecated' not in fun.__doc__.lower()):
+ raise self.fail('%r not in psutil.__all__' % name)
+
+ # Import 'star' will break if __all__ is inconsistent, see:
+ # https://github.com/giampaolo/psutil/issues/656
+ # Can't do `from psutil import *` as it won't work on python 3
+ # so we simply iterate over __all__.
+ for name in psutil.__all__:
+ self.assertIn(name, dir_psutil)
+
+ def test_version(self):
+ self.assertEqual('.'.join([str(x) for x in psutil.version_info]),
+ psutil.__version__)
+
+ def test_process_as_dict_no_new_names(self):
+ # See https://github.com/giampaolo/psutil/issues/813
+ p = psutil.Process()
+ p.foo = '1'
+ self.assertNotIn('foo', p.as_dict())
+
+ def test_serialization(self):
+ def check(ret):
+ if json is not None:
+ json.loads(json.dumps(ret))
+ a = pickle.dumps(ret)
+ b = pickle.loads(a)
+ self.assertEqual(ret, b)
+
+ check(psutil.Process().as_dict())
+ check(psutil.virtual_memory())
+ check(psutil.swap_memory())
+ check(psutil.cpu_times())
+ check(psutil.cpu_times_percent(interval=0))
+ check(psutil.net_io_counters())
+ if LINUX and not os.path.exists('/proc/diskstats'):
+ pass
+ else:
+ if not APPVEYOR:
+ check(psutil.disk_io_counters())
+ check(psutil.disk_partitions())
+ check(psutil.disk_usage(os.getcwd()))
+ check(psutil.users())
+
+ # XXX: https://github.com/pypa/setuptools/pull/2896
+ @unittest.skipIf(APPVEYOR, "temporarily disabled due to setuptools bug")
+ def test_setup_script(self):
+ setup_py = os.path.join(ROOT_DIR, 'setup.py')
+ if CI_TESTING and not os.path.exists(setup_py):
+ return self.skipTest("can't find setup.py")
+ module = import_module_by_path(setup_py)
+ self.assertRaises(SystemExit, module.setup)
+ self.assertEqual(module.get_version(), psutil.__version__)
+
+ def test_ad_on_process_creation(self):
+ # We are supposed to be able to instantiate Process also in case
+ # of zombie processes or access denied.
+ with mock.patch.object(psutil.Process, 'create_time',
+ side_effect=psutil.AccessDenied) as meth:
+ psutil.Process()
+ assert meth.called
+ with mock.patch.object(psutil.Process, 'create_time',
+ side_effect=psutil.ZombieProcess(1)) as meth:
+ psutil.Process()
+ assert meth.called
+ with mock.patch.object(psutil.Process, 'create_time',
+ side_effect=ValueError) as meth:
+ with self.assertRaises(ValueError):
+ psutil.Process()
+ assert meth.called
+
+ def test_sanity_version_check(self):
+ # see: https://github.com/giampaolo/psutil/issues/564
+ with mock.patch(
+ "psutil._psplatform.cext.version", return_value="0.0.0"):
+ with self.assertRaises(ImportError) as cm:
+ reload_module(psutil)
+ self.assertIn("version conflict", str(cm.exception).lower())
+
+
+# ===================================================================
+# --- psutil/_common.py utils
+# ===================================================================
+
+
+class TestCommonModule(PsutilTestCase):
+
+ def test_memoize(self):
+ @memoize
+ def foo(*args, **kwargs):
+ """foo docstring"""
+ calls.append(None)
+ return (args, kwargs)
+
+ calls = []
+ # no args
+ for x in range(2):
+ ret = foo()
+ expected = ((), {})
+ self.assertEqual(ret, expected)
+ self.assertEqual(len(calls), 1)
+ # with args
+ for x in range(2):
+ ret = foo(1)
+ expected = ((1, ), {})
+ self.assertEqual(ret, expected)
+ self.assertEqual(len(calls), 2)
+ # with args + kwargs
+ for x in range(2):
+ ret = foo(1, bar=2)
+ expected = ((1, ), {'bar': 2})
+ self.assertEqual(ret, expected)
+ self.assertEqual(len(calls), 3)
+ # clear cache
+ foo.cache_clear()
+ ret = foo()
+ expected = ((), {})
+ self.assertEqual(ret, expected)
+ self.assertEqual(len(calls), 4)
+ # docstring
+ self.assertEqual(foo.__doc__, "foo docstring")
+
+ def test_memoize_when_activated(self):
+ class Foo:
+
+ @memoize_when_activated
+ def foo(self):
+ calls.append(None)
+
+ f = Foo()
+ calls = []
+ f.foo()
+ f.foo()
+ self.assertEqual(len(calls), 2)
+
+ # activate
+ calls = []
+ f.foo.cache_activate(f)
+ f.foo()
+ f.foo()
+ self.assertEqual(len(calls), 1)
+
+ # deactivate
+ calls = []
+ f.foo.cache_deactivate(f)
+ f.foo()
+ f.foo()
+ self.assertEqual(len(calls), 2)
+
+ def test_parse_environ_block(self):
+ def k(s):
+ return s.upper() if WINDOWS else s
+
+ self.assertEqual(parse_environ_block("a=1\0"),
+ {k("a"): "1"})
+ self.assertEqual(parse_environ_block("a=1\0b=2\0\0"),
+ {k("a"): "1", k("b"): "2"})
+ self.assertEqual(parse_environ_block("a=1\0b=\0\0"),
+ {k("a"): "1", k("b"): ""})
+ # ignore everything after \0\0
+ self.assertEqual(parse_environ_block("a=1\0b=2\0\0c=3\0"),
+ {k("a"): "1", k("b"): "2"})
+ # ignore everything that is not an assignment
+ self.assertEqual(parse_environ_block("xxx\0a=1\0"), {k("a"): "1"})
+ self.assertEqual(parse_environ_block("a=1\0=b=2\0"), {k("a"): "1"})
+ # do not fail if the block is incomplete
+ self.assertEqual(parse_environ_block("a=1\0b=2"), {k("a"): "1"})
+
+ def test_supports_ipv6(self):
+ self.addCleanup(supports_ipv6.cache_clear)
+ if supports_ipv6():
+ with mock.patch('psutil._common.socket') as s:
+ s.has_ipv6 = False
+ supports_ipv6.cache_clear()
+ assert not supports_ipv6()
+
+ supports_ipv6.cache_clear()
+ with mock.patch('psutil._common.socket.socket',
+ side_effect=socket.error) as s:
+ assert not supports_ipv6()
+ assert s.called
+
+ supports_ipv6.cache_clear()
+ with mock.patch('psutil._common.socket.socket',
+ side_effect=socket.gaierror) as s:
+ assert not supports_ipv6()
+ supports_ipv6.cache_clear()
+ assert s.called
+
+ supports_ipv6.cache_clear()
+ with mock.patch('psutil._common.socket.socket.bind',
+ side_effect=socket.gaierror) as s:
+ assert not supports_ipv6()
+ supports_ipv6.cache_clear()
+ assert s.called
+ else:
+ with self.assertRaises(socket.error):
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ try:
+ sock.bind(("::1", 0))
+ finally:
+ sock.close()
+
+ def test_isfile_strict(self):
+ this_file = os.path.abspath(__file__)
+ assert isfile_strict(this_file)
+ assert not isfile_strict(os.path.dirname(this_file))
+ with mock.patch('psutil._common.os.stat',
+ side_effect=OSError(errno.EPERM, "foo")):
+ self.assertRaises(OSError, isfile_strict, this_file)
+ with mock.patch('psutil._common.os.stat',
+ side_effect=OSError(errno.EACCES, "foo")):
+ self.assertRaises(OSError, isfile_strict, this_file)
+ with mock.patch('psutil._common.os.stat',
+ side_effect=OSError(errno.ENOENT, "foo")):
+ assert not isfile_strict(this_file)
+ with mock.patch('psutil._common.stat.S_ISREG', return_value=False):
+ assert not isfile_strict(this_file)
+
+ def test_debug(self):
+ if PY3:
+ from io import StringIO
+ else:
+ from StringIO import StringIO
+
+ with redirect_stderr(StringIO()) as f:
+ debug("hello")
+ msg = f.getvalue()
+ assert msg.startswith("psutil-debug"), msg
+ self.assertIn("hello", msg)
+ self.assertIn(__file__.replace('.pyc', '.py'), msg)
+
+ # supposed to use repr(exc)
+ with redirect_stderr(StringIO()) as f:
+ debug(ValueError("this is an error"))
+ msg = f.getvalue()
+ self.assertIn("ignoring ValueError", msg)
+ self.assertIn("'this is an error'", msg)
+
+ # supposed to use str(exc), because of extra info about file name
+ with redirect_stderr(StringIO()) as f:
+ exc = OSError(2, "no such file")
+ exc.filename = "/foo"
+ debug(exc)
+ msg = f.getvalue()
+ self.assertIn("no such file", msg)
+ self.assertIn("/foo", msg)
+
+ def test_cat_bcat(self):
+ testfn = self.get_testfn()
+ with open(testfn, "wt") as f:
+ f.write("foo")
+ self.assertEqual(cat(testfn), "foo")
+ self.assertEqual(bcat(testfn), b"foo")
+ self.assertRaises(FileNotFoundError, cat, testfn + '-invalid')
+ self.assertRaises(FileNotFoundError, bcat, testfn + '-invalid')
+ self.assertEqual(cat(testfn + '-invalid', fallback="bar"), "bar")
+ self.assertEqual(bcat(testfn + '-invalid', fallback="bar"), "bar")
+
+
+# ===================================================================
+# --- Tests for wrap_numbers() function.
+# ===================================================================
+
+
+nt = collections.namedtuple('foo', 'a b c')
+
+
+class TestWrapNumbers(PsutilTestCase):
+
+ def setUp(self):
+ wrap_numbers.cache_clear()
+
+ tearDown = setUp
+
+ def test_first_call(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_input_hasnt_changed(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_increase_but_no_wrap(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(10, 15, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(20, 25, 30)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(20, 25, 30)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_wrap(self):
+ # let's say 100 is the threshold
+ input = {'disk1': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # first wrap restarts from 10
+ input = {'disk1': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 110)})
+ # then it remains the same
+ input = {'disk1': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 110)})
+ # then it goes up
+ input = {'disk1': nt(100, 100, 90)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 190)})
+ # then it wraps again
+ input = {'disk1': nt(100, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 210)})
+ # and remains the same
+ input = {'disk1': nt(100, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 210)})
+ # now wrap another num
+ input = {'disk1': nt(50, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(150, 100, 210)})
+ # and again
+ input = {'disk1': nt(40, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(190, 100, 210)})
+ # keep it the same
+ input = {'disk1': nt(40, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(190, 100, 210)})
+
+ def test_changing_keys(self):
+ # Emulate a case where the second call to disk_io()
+ # (or whatever) provides a new disk, then the new disk
+ # disappears on the third call.
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(5, 5, 5),
+ 'disk2': nt(7, 7, 7)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(8, 8, 8)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_changing_keys_w_wrap(self):
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # disk 2 wraps
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 110)})
+ # disk 2 disappears
+ input = {'disk1': nt(50, 50, 50)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ # then it appears again; the old wrap is supposed to be
+ # gone.
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # remains the same
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # and then wraps again
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 110)})
+
+ def test_real_data(self):
+ d = {'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
+ 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
+ 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
+ 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
+ self.assertEqual(wrap_numbers(d, 'disk_io'), d)
+ self.assertEqual(wrap_numbers(d, 'disk_io'), d)
+ # decrease this ↓
+ d = {'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
+ 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
+ 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
+ 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
+ out = wrap_numbers(d, 'disk_io')
+ self.assertEqual(out['nvme0n1'][0], 400)
+
+ # --- cache tests
+
+ def test_cache_first_call(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(cache[1], {'disk_io': {}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_call_twice(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ input = {'disk1': nt(10, 10, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_wrap(self):
+ # let's say 100 is the threshold
+ input = {'disk1': nt(100, 100, 100)}
+ wrap_numbers(input, 'disk_io')
+
+ # first wrap restarts from 10
+ input = {'disk1': nt(100, 100, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 100}})
+ self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ def assert_():
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0,
+ ('disk1', 2): 100}})
+ self.assertEqual(cache[2],
+ {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ # then it remains the same
+ input = {'disk1': nt(100, 100, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ assert_()
+
+ # then it goes up
+ input = {'disk1': nt(100, 100, 90)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ assert_()
+
+ # then it wraps again
+ input = {'disk1': nt(100, 100, 20)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 190}})
+ self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ def test_cache_changing_keys(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ input = {'disk1': nt(5, 5, 5),
+ 'disk2': nt(7, 7, 7)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_clear(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ wrap_numbers(input, 'disk_io')
+ wrap_numbers.cache_clear('disk_io')
+ self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {}))
+ wrap_numbers.cache_clear('disk_io')
+ wrap_numbers.cache_clear('?!?')
+
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
+ def test_cache_clear_public_apis(self):
+ if not psutil.disk_io_counters() or not psutil.net_io_counters():
+ return self.skipTest("no disks or NICs available")
+ psutil.disk_io_counters()
+ psutil.net_io_counters()
+ caches = wrap_numbers.cache_info()
+ for cache in caches:
+ self.assertIn('psutil.disk_io_counters', cache)
+ self.assertIn('psutil.net_io_counters', cache)
+
+ psutil.disk_io_counters.cache_clear()
+ caches = wrap_numbers.cache_info()
+ for cache in caches:
+ self.assertIn('psutil.net_io_counters', cache)
+ self.assertNotIn('psutil.disk_io_counters', cache)
+
+ psutil.net_io_counters.cache_clear()
+ caches = wrap_numbers.cache_info()
+ self.assertEqual(caches, ({}, {}, {}))
+
+
+# ===================================================================
+# --- Example script tests
+# ===================================================================
+
+
+@unittest.skipIf(not os.path.exists(SCRIPTS_DIR),
+ "can't locate scripts directory")
+class TestScripts(PsutilTestCase):
+ """Tests for scripts in the "scripts" directory."""
+
+ @staticmethod
+ def assert_stdout(exe, *args, **kwargs):
+ exe = '%s' % os.path.join(SCRIPTS_DIR, exe)
+ cmd = [PYTHON_EXE, exe]
+ for arg in args:
+ cmd.append(arg)
+ try:
+ out = sh(cmd, **kwargs).strip()
+ except RuntimeError as err:
+ if 'AccessDenied' in str(err):
+ return str(err)
+ else:
+ raise
+ assert out, out
+ return out
+
+ @staticmethod
+ def assert_syntax(exe, args=None):
+ exe = os.path.join(SCRIPTS_DIR, exe)
+ if PY3:
+ f = open(exe, 'rt', encoding='utf8')
+ else:
+ f = open(exe, 'rt')
+ with f:
+ src = f.read()
+ ast.parse(src)
+
+ def test_coverage(self):
+ # make sure all example scripts have a test method defined
+ meths = dir(self)
+ for name in os.listdir(SCRIPTS_DIR):
+ if name.endswith('.py'):
+ if 'test_' + os.path.splitext(name)[0] not in meths:
+ # self.assert_stdout(name)
+ raise self.fail('no test defined for %r script'
+ % os.path.join(SCRIPTS_DIR, name))
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ def test_executable(self):
+ for root, dirs, files in os.walk(SCRIPTS_DIR):
+ for file in files:
+ if file.endswith('.py'):
+ path = os.path.join(root, file)
+ if not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]:
+ raise self.fail('%r is not executable' % path)
+
+ def test_disk_usage(self):
+ self.assert_stdout('disk_usage.py')
+
+ def test_free(self):
+ self.assert_stdout('free.py')
+
+ def test_meminfo(self):
+ self.assert_stdout('meminfo.py')
+
+ def test_procinfo(self):
+ self.assert_stdout('procinfo.py', str(os.getpid()))
+
+ @unittest.skipIf(CI_TESTING and not psutil.users(), "no users")
+ def test_who(self):
+ self.assert_stdout('who.py')
+
+ def test_ps(self):
+ self.assert_stdout('ps.py')
+
+ def test_pstree(self):
+ self.assert_stdout('pstree.py')
+
+ def test_netstat(self):
+ self.assert_stdout('netstat.py')
+
+ def test_ifconfig(self):
+ self.assert_stdout('ifconfig.py')
+
+ @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
+ def test_pmap(self):
+ self.assert_stdout('pmap.py', str(os.getpid()))
+
+ def test_procsmem(self):
+ if 'uss' not in psutil.Process().memory_full_info()._fields:
+ raise self.skipTest("not supported")
+ self.assert_stdout('procsmem.py')
+
+ def test_killall(self):
+ self.assert_syntax('killall.py')
+
+ def test_nettop(self):
+ self.assert_syntax('nettop.py')
+
+ def test_top(self):
+ self.assert_syntax('top.py')
+
+ def test_iotop(self):
+ self.assert_syntax('iotop.py')
+
+ def test_pidof(self):
+ output = self.assert_stdout('pidof.py', psutil.Process().name())
+ self.assertIn(str(os.getpid()), output)
+
+ @unittest.skipIf(not WINDOWS, "WINDOWS only")
+ def test_winservices(self):
+ self.assert_stdout('winservices.py')
+
+ def test_cpu_distribution(self):
+ self.assert_syntax('cpu_distribution.py')
+
+ @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
+ def test_temperatures(self):
+ if not psutil.sensors_temperatures():
+ self.skipTest("no temperatures")
+ self.assert_stdout('temperatures.py')
+
+ @unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
+ def test_fans(self):
+ if not psutil.sensors_fans():
+ self.skipTest("no fans")
+ self.assert_stdout('fans.py')
+
+ @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_battery(self):
+ self.assert_stdout('battery.py')
+
+ @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_sensors(self):
+ self.assert_stdout('sensors.py')
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_osx.py b/lib/psutil/tests/test_osx.py
new file mode 100644
index 0000000..af12648
--- /dev/null
+++ b/lib/psutil/tests/test_osx.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""macOS specific tests."""
+
+import platform
+import re
+import time
+import unittest
+
+import psutil
+from psutil import MACOS
+from psutil import POSIX
+from psutil.tests import HAS_BATTERY
+from psutil.tests import TOLERANCE_DISK_USAGE
+from psutil.tests import TOLERANCE_SYS_MEM
+from psutil.tests import PsutilTestCase
+from psutil.tests import retry_on_failure
+from psutil.tests import sh
+from psutil.tests import spawn_testproc
+from psutil.tests import terminate
+
+
+if POSIX:
+ from psutil._psutil_posix import getpagesize
+
+
+def sysctl(cmdline):
+ """Expects a sysctl command with an argument and parse the result
+ returning only the value of interest.
+ """
+ out = sh(cmdline)
+ result = out.split()[1]
+ try:
+ return int(result)
+ except ValueError:
+ return result
+
+
+def vm_stat(field):
+ """Wrapper around 'vm_stat' cmdline utility."""
+ out = sh('vm_stat')
+ for line in out.split('\n'):
+ if field in line:
+ break
+ else:
+ raise ValueError("line not found")
+ return int(re.search(r'\d+', line).group(0)) * getpagesize()
+
+
+# http://code.activestate.com/recipes/578019/
+def human2bytes(s):
+ SYMBOLS = {
+ 'customary': ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'),
+ }
+ init = s
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ letter = s.strip()
+ for name, sset in SYMBOLS.items():
+ if letter in sset:
+ break
+ else:
+ if letter == 'k':
+ sset = SYMBOLS['customary']
+ letter = letter.upper()
+ else:
+ raise ValueError("can't interpret %r" % init)
+ prefix = {sset[0]: 1}
+ for i, s in enumerate(sset[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+ return int(num * prefix[letter])
+
+
+@unittest.skipIf(not MACOS, "MACOS only")
+class TestProcess(PsutilTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ def test_process_create_time(self):
+ output = sh("ps -o lstart -p %s" % self.pid)
+ start_ps = output.replace('STARTED', '').strip()
+ hhmmss = start_ps.split(' ')[-2]
+ year = start_ps.split(' ')[-1]
+ start_psutil = psutil.Process(self.pid).create_time()
+ self.assertEqual(
+ hhmmss,
+ time.strftime("%H:%M:%S", time.localtime(start_psutil)))
+ self.assertEqual(
+ year,
+ time.strftime("%Y", time.localtime(start_psutil)))
+
+
+@unittest.skipIf(not MACOS, "MACOS only")
+class TestSystemAPIs(PsutilTestCase):
+
+ # --- disk
+
+ @retry_on_failure()
+ def test_disks(self):
+ # test psutil.disk_usage() and psutil.disk_partitions()
+ # against "df -a"
+ def df(path):
+ out = sh('df -k "%s"' % path).strip()
+ lines = out.split('\n')
+ lines.pop(0)
+ line = lines.pop(0)
+ dev, total, used, free = line.split()[:4]
+ if dev == 'none':
+ dev = ''
+ total = int(total) * 1024
+ used = int(used) * 1024
+ free = int(free) * 1024
+ return dev, total, used, free
+
+ for part in psutil.disk_partitions(all=False):
+ usage = psutil.disk_usage(part.mountpoint)
+ dev, total, used, free = df(part.mountpoint)
+ self.assertEqual(part.device, dev)
+ self.assertEqual(usage.total, total)
+ self.assertAlmostEqual(usage.free, free,
+ delta=TOLERANCE_DISK_USAGE)
+ self.assertAlmostEqual(usage.used, used,
+ delta=TOLERANCE_DISK_USAGE)
+
+ # --- cpu
+
+ def test_cpu_count_logical(self):
+ num = sysctl("sysctl hw.logicalcpu")
+ self.assertEqual(num, psutil.cpu_count(logical=True))
+
+ def test_cpu_count_cores(self):
+ num = sysctl("sysctl hw.physicalcpu")
+ self.assertEqual(num, psutil.cpu_count(logical=False))
+
+ # TODO: remove this once 1892 is fixed
+ @unittest.skipIf(platform.machine() == 'arm64', "skipped due to #1892")
+ def test_cpu_freq(self):
+ freq = psutil.cpu_freq()
+ self.assertEqual(
+ freq.current * 1000 * 1000, sysctl("sysctl hw.cpufrequency"))
+ self.assertEqual(
+ freq.min * 1000 * 1000, sysctl("sysctl hw.cpufrequency_min"))
+ self.assertEqual(
+ freq.max * 1000 * 1000, sysctl("sysctl hw.cpufrequency_max"))
+
+ # --- virtual mem
+
+ def test_vmem_total(self):
+ sysctl_hwphymem = sysctl('sysctl hw.memsize')
+ self.assertEqual(sysctl_hwphymem, psutil.virtual_memory().total)
+
+ @retry_on_failure()
+ def test_vmem_free(self):
+ vmstat_val = vm_stat("free")
+ psutil_val = psutil.virtual_memory().free
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_active(self):
+ vmstat_val = vm_stat("active")
+ psutil_val = psutil.virtual_memory().active
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_inactive(self):
+ vmstat_val = vm_stat("inactive")
+ psutil_val = psutil.virtual_memory().inactive
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_vmem_wired(self):
+ vmstat_val = vm_stat("wired")
+ psutil_val = psutil.virtual_memory().wired
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ # --- swap mem
+
+ @retry_on_failure()
+ def test_swapmem_sin(self):
+ vmstat_val = vm_stat("Pageins")
+ psutil_val = psutil.swap_memory().sin
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ @retry_on_failure()
+ def test_swapmem_sout(self):
+ vmstat_val = vm_stat("Pageout")
+ psutil_val = psutil.swap_memory().sout
+ self.assertAlmostEqual(psutil_val, vmstat_val, delta=TOLERANCE_SYS_MEM)
+
+ # Not very reliable.
+ # def test_swapmem_total(self):
+ # out = sh('sysctl vm.swapusage')
+ # out = out.replace('vm.swapusage: ', '')
+ # total, used, free = re.findall('\d+.\d+\w', out)
+ # psutil_smem = psutil.swap_memory()
+ # self.assertEqual(psutil_smem.total, human2bytes(total))
+ # self.assertEqual(psutil_smem.used, human2bytes(used))
+ # self.assertEqual(psutil_smem.free, human2bytes(free))
+
+ # --- network
+
+ def test_net_if_stats(self):
+ for name, stats in psutil.net_if_stats().items():
+ try:
+ out = sh("ifconfig %s" % name)
+ except RuntimeError:
+ pass
+ else:
+ self.assertEqual(stats.isup, 'RUNNING' in out, msg=out)
+ self.assertEqual(stats.mtu,
+ int(re.findall(r'mtu (\d+)', out)[0]))
+
+ # --- sensors_battery
+
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_sensors_battery(self):
+ out = sh("pmset -g batt")
+ percent = re.search(r"(\d+)%", out).group(1)
+ drawing_from = re.search("Now drawing from '([^']+)'", out).group(1)
+ power_plugged = drawing_from == "AC Power"
+ psutil_result = psutil.sensors_battery()
+ self.assertEqual(psutil_result.power_plugged, power_plugged)
+ self.assertEqual(psutil_result.percent, int(percent))
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_posix.py b/lib/psutil/tests/test_posix.py
new file mode 100644
index 0000000..d873223
--- /dev/null
+++ b/lib/psutil/tests/test_posix.py
@@ -0,0 +1,432 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""POSIX specific tests."""
+
+import datetime
+import errno
+import os
+import re
+import subprocess
+import time
+import unittest
+
+import psutil
+from psutil import AIX
+from psutil import BSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import OPENBSD
+from psutil import POSIX
+from psutil import SUNOS
+from psutil.tests import CI_TESTING
+from psutil.tests import HAS_NET_IO_COUNTERS
+from psutil.tests import PYTHON_EXE
+from psutil.tests import PsutilTestCase
+from psutil.tests import mock
+from psutil.tests import retry_on_failure
+from psutil.tests import sh
+from psutil.tests import skip_on_access_denied
+from psutil.tests import spawn_testproc
+from psutil.tests import terminate
+from psutil.tests import which
+
+
+if POSIX:
+ import mmap
+ import resource
+
+ from psutil._psutil_posix import getpagesize
+
+
+def ps(fmt, pid=None):
+ """
+ Wrapper for calling the ps command with a little bit of cross-platform
+ support for a narrow range of features.
+ """
+
+ cmd = ['ps']
+
+ if LINUX:
+ cmd.append('--no-headers')
+
+ if pid is not None:
+ cmd.extend(['-p', str(pid)])
+ else:
+ if SUNOS or AIX:
+ cmd.append('-A')
+ else:
+ cmd.append('ax')
+
+ if SUNOS:
+ fmt_map = set(('command', 'comm', 'start', 'stime'))
+ fmt = fmt_map.get(fmt, fmt)
+
+ cmd.extend(['-o', fmt])
+
+ output = sh(cmd)
+
+ if LINUX:
+ output = output.splitlines()
+ else:
+ output = output.splitlines()[1:]
+
+ all_output = []
+ for line in output:
+ line = line.strip()
+
+ try:
+ line = int(line)
+ except ValueError:
+ pass
+
+ all_output.append(line)
+
+ if pid is None:
+ return all_output
+ else:
+ return all_output[0]
+
+# ps "-o" field names differ wildly between platforms.
+# "comm" means "only executable name" but is not available on BSD platforms.
+# "args" means "command with all its arguments", and is also not available
+# on BSD platforms.
+# "command" is like "args" on most platforms, but like "comm" on AIX,
+# and not available on SUNOS.
+# so for the executable name we can use "comm" on Solaris and split "command"
+# on other platforms.
+# to get the cmdline (with args) we have to use "args" on AIX and
+# Solaris, and can use "command" on all others.
+
+
+def ps_name(pid):
+ field = "command"
+ if SUNOS:
+ field = "comm"
+ return ps(field, pid).split()[0]
+
+
+def ps_args(pid):
+ field = "command"
+ if AIX or SUNOS:
+ field = "args"
+ return ps(field, pid)
+
+
+def ps_rss(pid):
+ field = "rss"
+ if AIX:
+ field = "rssize"
+ return ps(field, pid)
+
+
+def ps_vsz(pid):
+ field = "vsz"
+ if AIX:
+ field = "vsize"
+ return ps(field, pid)
+
+
+@unittest.skipIf(not POSIX, "POSIX only")
+class TestProcess(PsutilTestCase):
+ """Compare psutil results against 'ps' command line utility (mainly)."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc([PYTHON_EXE, "-E", "-O"],
+ stdin=subprocess.PIPE).pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ def test_ppid(self):
+ ppid_ps = ps('ppid', self.pid)
+ ppid_psutil = psutil.Process(self.pid).ppid()
+ self.assertEqual(ppid_ps, ppid_psutil)
+
+ def test_uid(self):
+ uid_ps = ps('uid', self.pid)
+ uid_psutil = psutil.Process(self.pid).uids().real
+ self.assertEqual(uid_ps, uid_psutil)
+
+ def test_gid(self):
+ gid_ps = ps('rgid', self.pid)
+ gid_psutil = psutil.Process(self.pid).gids().real
+ self.assertEqual(gid_ps, gid_psutil)
+
+ def test_username(self):
+ username_ps = ps('user', self.pid)
+ username_psutil = psutil.Process(self.pid).username()
+ self.assertEqual(username_ps, username_psutil)
+
+ def test_username_no_resolution(self):
+ # Emulate a case where the system can't resolve the uid to
+ # a username in which case psutil is supposed to return
+ # the stringified uid.
+ p = psutil.Process()
+ with mock.patch("psutil.pwd.getpwuid", side_effect=KeyError) as fun:
+ self.assertEqual(p.username(), str(p.uids().real))
+ assert fun.called
+
+ @skip_on_access_denied()
+ @retry_on_failure()
+ def test_rss_memory(self):
+ # give python interpreter some time to properly initialize
+ # so that the results are the same
+ time.sleep(0.1)
+ rss_ps = ps_rss(self.pid)
+ rss_psutil = psutil.Process(self.pid).memory_info()[0] / 1024
+ self.assertEqual(rss_ps, rss_psutil)
+
+ @skip_on_access_denied()
+ @retry_on_failure()
+ def test_vsz_memory(self):
+ # give python interpreter some time to properly initialize
+ # so that the results are the same
+ time.sleep(0.1)
+ vsz_ps = ps_vsz(self.pid)
+ vsz_psutil = psutil.Process(self.pid).memory_info()[1] / 1024
+ self.assertEqual(vsz_ps, vsz_psutil)
+
+ def test_name(self):
+ name_ps = ps_name(self.pid)
+ # remove path if there is any, from the command
+ name_ps = os.path.basename(name_ps).lower()
+ name_psutil = psutil.Process(self.pid).name().lower()
+ # ...because of how we calculate PYTHON_EXE; on MACOS this may
+ # be "pythonX.Y".
+ name_ps = re.sub(r"\d.\d", "", name_ps)
+ name_psutil = re.sub(r"\d.\d", "", name_psutil)
+ # ...may also be "python.X"
+ name_ps = re.sub(r"\d", "", name_ps)
+ name_psutil = re.sub(r"\d", "", name_psutil)
+ self.assertEqual(name_ps, name_psutil)
+
+ def test_name_long(self):
+ # On UNIX the kernel truncates the name to the first 15
+ # characters. In such a case psutil tries to determine the
+ # full name from the cmdline.
+ name = "long-program-name"
+ cmdline = ["long-program-name-extended", "foo", "bar"]
+ with mock.patch("psutil._psplatform.Process.name",
+ return_value=name):
+ with mock.patch("psutil._psplatform.Process.cmdline",
+ return_value=cmdline):
+ p = psutil.Process()
+ self.assertEqual(p.name(), "long-program-name-extended")
+
+ def test_name_long_cmdline_ad_exc(self):
+ # Same as above but emulates a case where cmdline() raises
+ # AccessDenied in which case psutil is supposed to return
+ # the truncated name instead of crashing.
+ name = "long-program-name"
+ with mock.patch("psutil._psplatform.Process.name",
+ return_value=name):
+ with mock.patch("psutil._psplatform.Process.cmdline",
+ side_effect=psutil.AccessDenied(0, "")):
+ p = psutil.Process()
+ self.assertEqual(p.name(), "long-program-name")
+
+ def test_name_long_cmdline_nsp_exc(self):
+ # Same as above but emulates a case where cmdline() raises NSP
+ # which is supposed to propagate.
+ name = "long-program-name"
+ with mock.patch("psutil._psplatform.Process.name",
+ return_value=name):
+ with mock.patch("psutil._psplatform.Process.cmdline",
+ side_effect=psutil.NoSuchProcess(0, "")):
+ p = psutil.Process()
+ self.assertRaises(psutil.NoSuchProcess, p.name)
+
+ @unittest.skipIf(MACOS or BSD, 'ps -o start not available')
+ def test_create_time(self):
+ time_ps = ps('start', self.pid)
+ time_psutil = psutil.Process(self.pid).create_time()
+ time_psutil_tstamp = datetime.datetime.fromtimestamp(
+ time_psutil).strftime("%H:%M:%S")
+ # sometimes ps shows the time rounded up instead of down, so we check
+ # for both possible values
+ round_time_psutil = round(time_psutil)
+ round_time_psutil_tstamp = datetime.datetime.fromtimestamp(
+ round_time_psutil).strftime("%H:%M:%S")
+ self.assertIn(time_ps, [time_psutil_tstamp, round_time_psutil_tstamp])
+
+ def test_exe(self):
+ ps_pathname = ps_name(self.pid)
+ psutil_pathname = psutil.Process(self.pid).exe()
+ try:
+ self.assertEqual(ps_pathname, psutil_pathname)
+ except AssertionError:
+ # certain platforms such as BSD are more accurate returning:
+ # "/usr/local/bin/python2.7"
+ # ...instead of:
+ # "/usr/local/bin/python"
+ # We do not want to consider this difference in accuracy
+ # an error.
+ adjusted_ps_pathname = ps_pathname[:len(ps_pathname)]
+ self.assertEqual(ps_pathname, adjusted_ps_pathname)
+
+ # On macOS the official python installer exposes a python wrapper that
+ # executes a python executable hidden inside an application bundle inside
+ # the Python framework.
+ # There's a race condition between the ps call & the psutil call below
+ # depending on the completion of the execve call so let's retry on failure
+ @retry_on_failure()
+ def test_cmdline(self):
+ ps_cmdline = ps_args(self.pid)
+ psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline())
+ self.assertEqual(ps_cmdline, psutil_cmdline)
+
+ # On SUNOS "ps" reads niceness /proc/pid/psinfo which returns an
+ # incorrect value (20); the real deal is getpriority(2) which
+ # returns 0; psutil relies on it, see:
+ # https://github.com/giampaolo/psutil/issues/1082
+ # AIX has the same issue
+ @unittest.skipIf(SUNOS, "not reliable on SUNOS")
+ @unittest.skipIf(AIX, "not reliable on AIX")
+ def test_nice(self):
+ ps_nice = ps('nice', self.pid)
+ psutil_nice = psutil.Process().nice()
+ self.assertEqual(ps_nice, psutil_nice)
+
+
+@unittest.skipIf(not POSIX, "POSIX only")
+class TestSystemAPIs(PsutilTestCase):
+ """Test some system APIs."""
+
+ @retry_on_failure()
+ def test_pids(self):
+ # Note: this test might fail if the OS is starting/killing
+ # other processes in the meantime
+ pids_ps = sorted(ps("pid"))
+ pids_psutil = psutil.pids()
+
+ # on MACOS and OPENBSD ps doesn't show pid 0
+ if MACOS or OPENBSD and 0 not in pids_ps:
+ pids_ps.insert(0, 0)
+
+ # There will often be one more process in pids_ps for ps itself
+ if len(pids_ps) - len(pids_psutil) > 1:
+ difference = [x for x in pids_psutil if x not in pids_ps] + \
+ [x for x in pids_ps if x not in pids_psutil]
+ raise self.fail("difference: " + str(difference))
+
+ # for some reason ifconfig -a does not report all interfaces
+ # returned by psutil
+ @unittest.skipIf(SUNOS, "unreliable on SUNOS")
+ @unittest.skipIf(not which('ifconfig'), "no ifconfig cmd")
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, "not supported")
+ def test_nic_names(self):
+ output = sh("ifconfig -a")
+ for nic in psutil.net_io_counters(pernic=True).keys():
+ for line in output.split():
+ if line.startswith(nic):
+ break
+ else:
+ raise self.fail(
+ "couldn't find %s nic in 'ifconfig -a' output\n%s" % (
+ nic, output))
+
+ @unittest.skipIf(CI_TESTING and not psutil.users(), "unreliable on CI")
+ @retry_on_failure()
+ def test_users(self):
+ out = sh("who")
+ if not out.strip():
+ raise self.skipTest("no users on this system")
+ lines = out.split('\n')
+ users = [x.split()[0] for x in lines]
+ terminals = [x.split()[1] for x in lines]
+ self.assertEqual(len(users), len(psutil.users()))
+ for u in psutil.users():
+ self.assertIn(u.name, users)
+ self.assertIn(u.terminal, terminals)
+
+ def test_pid_exists_let_raise(self):
+ # According to "man 2 kill" possible error values for kill
+ # are (EINVAL, EPERM, ESRCH). Test that any other errno
+ # results in an exception.
+ with mock.patch("psutil._psposix.os.kill",
+ side_effect=OSError(errno.EBADF, "")) as m:
+ self.assertRaises(OSError, psutil._psposix.pid_exists, os.getpid())
+ assert m.called
+
+ def test_os_waitpid_let_raise(self):
+ # os.waitpid() is supposed to catch EINTR and ECHILD only.
+ # Test that any other errno results in an exception.
+ with mock.patch("psutil._psposix.os.waitpid",
+ side_effect=OSError(errno.EBADF, "")) as m:
+ self.assertRaises(OSError, psutil._psposix.wait_pid, os.getpid())
+ assert m.called
+
+ def test_os_waitpid_eintr(self):
+ # os.waitpid() is supposed to "retry" on EINTR.
+ with mock.patch("psutil._psposix.os.waitpid",
+ side_effect=OSError(errno.EINTR, "")) as m:
+ self.assertRaises(
+ psutil._psposix.TimeoutExpired,
+ psutil._psposix.wait_pid, os.getpid(), timeout=0.01)
+ assert m.called
+
+ def test_os_waitpid_bad_ret_status(self):
+ # Simulate os.waitpid() returning a bad status.
+ with mock.patch("psutil._psposix.os.waitpid",
+ return_value=(1, -1)) as m:
+ self.assertRaises(ValueError,
+ psutil._psposix.wait_pid, os.getpid())
+ assert m.called
+
+ # AIX can return '-' in df output instead of numbers, e.g. for /proc
+ @unittest.skipIf(AIX, "unreliable on AIX")
+ @retry_on_failure()
+ def test_disk_usage(self):
+ def df(device):
+ out = sh("df -k %s" % device).strip()
+ line = out.split('\n')[1]
+ fields = line.split()
+ total = int(fields[1]) * 1024
+ used = int(fields[2]) * 1024
+ free = int(fields[3]) * 1024
+ percent = float(fields[4].replace('%', ''))
+ return (total, used, free, percent)
+
+ tolerance = 4 * 1024 * 1024 # 4MB
+ for part in psutil.disk_partitions(all=False):
+ usage = psutil.disk_usage(part.mountpoint)
+ try:
+ total, used, free, percent = df(part.device)
+ except RuntimeError as err:
+ # see:
+ # https://travis-ci.org/giampaolo/psutil/jobs/138338464
+ # https://travis-ci.org/giampaolo/psutil/jobs/138343361
+ err = str(err).lower()
+ if "no such file or directory" in err or \
+ "raw devices not supported" in err or \
+ "permission denied" in err:
+ continue
+ else:
+ raise
+ else:
+ self.assertAlmostEqual(usage.total, total, delta=tolerance)
+ self.assertAlmostEqual(usage.used, used, delta=tolerance)
+ self.assertAlmostEqual(usage.free, free, delta=tolerance)
+ self.assertAlmostEqual(usage.percent, percent, delta=1)
+
+
+@unittest.skipIf(not POSIX, "POSIX only")
+class TestMisc(PsutilTestCase):
+
+ def test_getpagesize(self):
+ pagesize = getpagesize()
+ self.assertGreater(pagesize, 0)
+ self.assertEqual(pagesize, resource.getpagesize())
+ self.assertEqual(pagesize, mmap.PAGESIZE)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_process.py b/lib/psutil/tests/test_process.py
new file mode 100644
index 0000000..26869e9
--- /dev/null
+++ b/lib/psutil/tests/test_process.py
@@ -0,0 +1,1591 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for psutil.Process class."""
+
+import collections
+import errno
+import getpass
+import itertools
+import os
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import textwrap
+import time
+import types
+import unittest
+
+import psutil
+from psutil import AIX
+from psutil import BSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import NETBSD
+from psutil import OPENBSD
+from psutil import OSX
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._common import open_text
+from psutil._compat import PY3
+from psutil._compat import FileNotFoundError
+from psutil._compat import long
+from psutil._compat import super
+from psutil.tests import APPVEYOR
+from psutil.tests import CI_TESTING
+from psutil.tests import GITHUB_ACTIONS
+from psutil.tests import GLOBAL_TIMEOUT
+from psutil.tests import HAS_CPU_AFFINITY
+from psutil.tests import HAS_ENVIRON
+from psutil.tests import HAS_IONICE
+from psutil.tests import HAS_MEMORY_MAPS
+from psutil.tests import HAS_PROC_CPU_NUM
+from psutil.tests import HAS_PROC_IO_COUNTERS
+from psutil.tests import HAS_RLIMIT
+from psutil.tests import HAS_THREADS
+from psutil.tests import MACOS_11PLUS
+from psutil.tests import PYPY
+from psutil.tests import PYTHON_EXE
+from psutil.tests import PsutilTestCase
+from psutil.tests import ThreadTask
+from psutil.tests import call_until
+from psutil.tests import copyload_shared_lib
+from psutil.tests import create_exe
+from psutil.tests import mock
+from psutil.tests import process_namespace
+from psutil.tests import reap_children
+from psutil.tests import retry_on_failure
+from psutil.tests import sh
+from psutil.tests import skip_on_access_denied
+from psutil.tests import skip_on_not_implemented
+from psutil.tests import wait_for_pid
+
+
+# ===================================================================
+# --- psutil.Process class tests
+# ===================================================================
+
+
+class TestProcess(PsutilTestCase):
+ """Tests for psutil.Process class."""
+
+ def spawn_psproc(self, *args, **kwargs):
+ sproc = self.spawn_testproc(*args, **kwargs)
+ return psutil.Process(sproc.pid)
+
+ # ---
+
+ def test_pid(self):
+ p = psutil.Process()
+ self.assertEqual(p.pid, os.getpid())
+ with self.assertRaises(AttributeError):
+ p.pid = 33
+
+ def test_kill(self):
+ p = self.spawn_psproc()
+ p.kill()
+ code = p.wait()
+ if WINDOWS:
+ self.assertEqual(code, signal.SIGTERM)
+ else:
+ self.assertEqual(code, -signal.SIGKILL)
+ self.assertProcessGone(p)
+
+ def test_terminate(self):
+ p = self.spawn_psproc()
+ p.terminate()
+ code = p.wait()
+ if WINDOWS:
+ self.assertEqual(code, signal.SIGTERM)
+ else:
+ self.assertEqual(code, -signal.SIGTERM)
+ self.assertProcessGone(p)
+
+ def test_send_signal(self):
+ sig = signal.SIGKILL if POSIX else signal.SIGTERM
+ p = self.spawn_psproc()
+ p.send_signal(sig)
+ code = p.wait()
+ if WINDOWS:
+ self.assertEqual(code, sig)
+ else:
+ self.assertEqual(code, -sig)
+ self.assertProcessGone(p)
+
+ @unittest.skipIf(not POSIX, "not POSIX")
+ def test_send_signal_mocked(self):
+ sig = signal.SIGTERM
+ p = self.spawn_psproc()
+ with mock.patch('psutil.os.kill',
+ side_effect=OSError(errno.ESRCH, "")):
+ self.assertRaises(psutil.NoSuchProcess, p.send_signal, sig)
+
+ p = self.spawn_psproc()
+ with mock.patch('psutil.os.kill',
+ side_effect=OSError(errno.EPERM, "")):
+ self.assertRaises(psutil.AccessDenied, p.send_signal, sig)
+
+ def test_wait_exited(self):
+ # Test waitpid() + WIFEXITED -> WEXITSTATUS.
+ # normal return, same as exit(0)
+ cmd = [PYTHON_EXE, "-c", "pass"]
+ p = self.spawn_psproc(cmd)
+ code = p.wait()
+ self.assertEqual(code, 0)
+ self.assertProcessGone(p)
+ # exit(1), implicit in case of error
+ cmd = [PYTHON_EXE, "-c", "1 / 0"]
+ p = self.spawn_psproc(cmd, stderr=subprocess.PIPE)
+ code = p.wait()
+ self.assertEqual(code, 1)
+ self.assertProcessGone(p)
+ # via sys.exit()
+ cmd = [PYTHON_EXE, "-c", "import sys; sys.exit(5);"]
+ p = self.spawn_psproc(cmd)
+ code = p.wait()
+ self.assertEqual(code, 5)
+ self.assertProcessGone(p)
+ # via os._exit()
+ cmd = [PYTHON_EXE, "-c", "import os; os._exit(5);"]
+ p = self.spawn_psproc(cmd)
+ code = p.wait()
+ self.assertEqual(code, 5)
+ self.assertProcessGone(p)
+
+ def test_wait_stopped(self):
+ p = self.spawn_psproc()
+ if POSIX:
+ # Test waitpid() + WIFSTOPPED and WIFCONTINUED.
+ # Note: if a process is stopped it ignores SIGTERM.
+ p.send_signal(signal.SIGSTOP)
+ self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
+ p.send_signal(signal.SIGCONT)
+ self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
+ p.send_signal(signal.SIGTERM)
+ self.assertEqual(p.wait(), -signal.SIGTERM)
+ self.assertEqual(p.wait(), -signal.SIGTERM)
+ else:
+ p.suspend()
+ self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
+ p.resume()
+ self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
+ p.terminate()
+ self.assertEqual(p.wait(), signal.SIGTERM)
+ self.assertEqual(p.wait(), signal.SIGTERM)
+
+ def test_wait_non_children(self):
+ # Test wait() against a process which is not our direct
+ # child.
+ child, grandchild = self.spawn_children_pair()
+ self.assertRaises(psutil.TimeoutExpired, child.wait, 0.01)
+ self.assertRaises(psutil.TimeoutExpired, grandchild.wait, 0.01)
+ # We also terminate the direct child otherwise the
+ # grandchild will hang until the parent is gone.
+ child.terminate()
+ grandchild.terminate()
+ child_ret = child.wait()
+ grandchild_ret = grandchild.wait()
+ if POSIX:
+ self.assertEqual(child_ret, -signal.SIGTERM)
+ # For processes which are not our children we're supposed
+ # to get None.
+ self.assertEqual(grandchild_ret, None)
+ else:
+ self.assertEqual(child_ret, signal.SIGTERM)
+ self.assertEqual(child_ret, signal.SIGTERM)
+
+ def test_wait_timeout(self):
+ p = self.spawn_psproc()
+ p.name()
+ self.assertRaises(psutil.TimeoutExpired, p.wait, 0.01)
+ self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
+ self.assertRaises(ValueError, p.wait, -1)
+
+ def test_wait_timeout_nonblocking(self):
+ p = self.spawn_psproc()
+ self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
+ p.kill()
+ stop_at = time.time() + GLOBAL_TIMEOUT
+ while time.time() < stop_at:
+ try:
+ code = p.wait(0)
+ break
+ except psutil.TimeoutExpired:
+ pass
+ else:
+ raise self.fail('timeout')
+ if POSIX:
+ self.assertEqual(code, -signal.SIGKILL)
+ else:
+ self.assertEqual(code, signal.SIGTERM)
+ self.assertProcessGone(p)
+
+ def test_cpu_percent(self):
+ p = psutil.Process()
+ p.cpu_percent(interval=0.001)
+ p.cpu_percent(interval=0.001)
+ for x in range(100):
+ percent = p.cpu_percent(interval=None)
+ self.assertIsInstance(percent, float)
+ self.assertGreaterEqual(percent, 0.0)
+ with self.assertRaises(ValueError):
+ p.cpu_percent(interval=-1)
+
+ def test_cpu_percent_numcpus_none(self):
+ # See: https://github.com/giampaolo/psutil/issues/1087
+ with mock.patch('psutil.cpu_count', return_value=None) as m:
+ psutil.Process().cpu_percent()
+ assert m.called
+
+ def test_cpu_times(self):
+ times = psutil.Process().cpu_times()
+ assert (times.user > 0.0) or (times.system > 0.0), times
+ assert (times.children_user >= 0.0), times
+ assert (times.children_system >= 0.0), times
+ if LINUX:
+ assert times.iowait >= 0.0, times
+ # make sure returned values can be pretty printed with strftime
+ for name in times._fields:
+ time.strftime("%H:%M:%S", time.localtime(getattr(times, name)))
+
+ def test_cpu_times_2(self):
+ user_time, kernel_time = psutil.Process().cpu_times()[:2]
+ utime, ktime = os.times()[:2]
+
+ # Use os.times()[:2] as base values to compare our results
+ # using a tolerance of +/- 0.1 seconds.
+ # It will fail if the difference between the values is > 0.1s.
+ if (max([user_time, utime]) - min([user_time, utime])) > 0.1:
+ raise self.fail("expected: %s, found: %s" % (utime, user_time))
+
+ if (max([kernel_time, ktime]) - min([kernel_time, ktime])) > 0.1:
+ raise self.fail("expected: %s, found: %s" % (ktime, kernel_time))
+
+ @unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported")
+ def test_cpu_num(self):
+ p = psutil.Process()
+ num = p.cpu_num()
+ self.assertGreaterEqual(num, 0)
+ if psutil.cpu_count() == 1:
+ self.assertEqual(num, 0)
+ self.assertIn(p.cpu_num(), range(psutil.cpu_count()))
+
+ def test_create_time(self):
+ p = self.spawn_psproc()
+ now = time.time()
+ create_time = p.create_time()
+
+ # Use time.time() as base value to compare our result using a
+ # tolerance of +/- 1 second.
+ # It will fail if the difference between the values is > 2s.
+ difference = abs(create_time - now)
+ if difference > 2:
+ raise self.fail("expected: %s, found: %s, difference: %s"
+ % (now, create_time, difference))
+
+ # make sure returned value can be pretty printed with strftime
+ time.strftime("%Y %m %d %H:%M:%S", time.localtime(p.create_time()))
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_terminal(self):
+ terminal = psutil.Process().terminal()
+ if terminal is not None:
+ tty = os.path.realpath(sh('tty'))
+ self.assertEqual(terminal, tty)
+
+ @unittest.skipIf(not HAS_PROC_IO_COUNTERS, 'not supported')
+ @skip_on_not_implemented(only_if=LINUX)
+ def test_io_counters(self):
+ p = psutil.Process()
+ # test reads
+ io1 = p.io_counters()
+ with open(PYTHON_EXE, 'rb') as f:
+ f.read()
+ io2 = p.io_counters()
+ if not BSD and not AIX:
+ self.assertGreater(io2.read_count, io1.read_count)
+ self.assertEqual(io2.write_count, io1.write_count)
+ if LINUX:
+ self.assertGreater(io2.read_chars, io1.read_chars)
+ self.assertEqual(io2.write_chars, io1.write_chars)
+ else:
+ self.assertGreaterEqual(io2.read_bytes, io1.read_bytes)
+ self.assertGreaterEqual(io2.write_bytes, io1.write_bytes)
+
+ # test writes
+ io1 = p.io_counters()
+ with open(self.get_testfn(), 'wb') as f:
+ if PY3:
+ f.write(bytes("x" * 1000000, 'ascii'))
+ else:
+ f.write("x" * 1000000)
+ io2 = p.io_counters()
+ self.assertGreaterEqual(io2.write_count, io1.write_count)
+ self.assertGreaterEqual(io2.write_bytes, io1.write_bytes)
+ self.assertGreaterEqual(io2.read_count, io1.read_count)
+ self.assertGreaterEqual(io2.read_bytes, io1.read_bytes)
+ if LINUX:
+ self.assertGreater(io2.write_chars, io1.write_chars)
+ self.assertGreaterEqual(io2.read_chars, io1.read_chars)
+
+ # sanity check
+ for i in range(len(io2)):
+ if BSD and i >= 2:
+ # On BSD read_bytes and write_bytes are always set to -1.
+ continue
+ self.assertGreaterEqual(io2[i], 0)
+ self.assertGreaterEqual(io2[i], 0)
+
+ @unittest.skipIf(not HAS_IONICE, "not supported")
+ @unittest.skipIf(not LINUX, "linux only")
+ def test_ionice_linux(self):
+ p = psutil.Process()
+ if not CI_TESTING:
+ self.assertEqual(p.ionice()[0], psutil.IOPRIO_CLASS_NONE)
+ self.assertEqual(psutil.IOPRIO_CLASS_NONE, 0)
+ self.assertEqual(psutil.IOPRIO_CLASS_RT, 1) # high
+ self.assertEqual(psutil.IOPRIO_CLASS_BE, 2) # normal
+ self.assertEqual(psutil.IOPRIO_CLASS_IDLE, 3) # low
+ init = p.ionice()
+ try:
+ # low
+ p.ionice(psutil.IOPRIO_CLASS_IDLE)
+ self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_IDLE, 0))
+ with self.assertRaises(ValueError): # accepts no value
+ p.ionice(psutil.IOPRIO_CLASS_IDLE, value=7)
+ # normal
+ p.ionice(psutil.IOPRIO_CLASS_BE)
+ self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_BE, 0))
+ p.ionice(psutil.IOPRIO_CLASS_BE, value=7)
+ self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_BE, 7))
+ with self.assertRaises(ValueError):
+ p.ionice(psutil.IOPRIO_CLASS_BE, value=8)
+ try:
+ p.ionice(psutil.IOPRIO_CLASS_RT, value=7)
+ except psutil.AccessDenied:
+ pass
+ # errs
+ self.assertRaisesRegex(
+ ValueError, "ioclass accepts no value",
+ p.ionice, psutil.IOPRIO_CLASS_NONE, 1)
+ self.assertRaisesRegex(
+ ValueError, "ioclass accepts no value",
+ p.ionice, psutil.IOPRIO_CLASS_IDLE, 1)
+ self.assertRaisesRegex(
+ ValueError, "'ioclass' argument must be specified",
+ p.ionice, value=1)
+ finally:
+ ioclass, value = init
+ if ioclass == psutil.IOPRIO_CLASS_NONE:
+ value = 0
+ p.ionice(ioclass, value)
+
+ @unittest.skipIf(not HAS_IONICE, "not supported")
+ @unittest.skipIf(not WINDOWS, 'not supported on this win version')
+ def test_ionice_win(self):
+ p = psutil.Process()
+ if not CI_TESTING:
+ self.assertEqual(p.ionice(), psutil.IOPRIO_NORMAL)
+ init = p.ionice()
+ try:
+ # base
+ p.ionice(psutil.IOPRIO_VERYLOW)
+ self.assertEqual(p.ionice(), psutil.IOPRIO_VERYLOW)
+ p.ionice(psutil.IOPRIO_LOW)
+ self.assertEqual(p.ionice(), psutil.IOPRIO_LOW)
+ try:
+ p.ionice(psutil.IOPRIO_HIGH)
+ except psutil.AccessDenied:
+ pass
+ else:
+ self.assertEqual(p.ionice(), psutil.IOPRIO_HIGH)
+ # errs
+ self.assertRaisesRegex(
+ TypeError, "value argument not accepted on Windows",
+ p.ionice, psutil.IOPRIO_NORMAL, value=1)
+ self.assertRaisesRegex(
+ ValueError, "is not a valid priority",
+ p.ionice, psutil.IOPRIO_HIGH + 1)
+ finally:
+ p.ionice(init)
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_get(self):
+ import resource
+ p = psutil.Process(os.getpid())
+ names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
+ assert names, names
+ for name in names:
+ value = getattr(psutil, name)
+ self.assertGreaterEqual(value, 0)
+ if name in dir(resource):
+ self.assertEqual(value, getattr(resource, name))
+ # XXX - On PyPy RLIMIT_INFINITY returned by
+ # resource.getrlimit() is reported as a very big long
+ # number instead of -1. It looks like a bug with PyPy.
+ if PYPY:
+ continue
+ self.assertEqual(p.rlimit(value), resource.getrlimit(value))
+ else:
+ ret = p.rlimit(value)
+ self.assertEqual(len(ret), 2)
+ self.assertGreaterEqual(ret[0], -1)
+ self.assertGreaterEqual(ret[1], -1)
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_set(self):
+ p = self.spawn_psproc()
+ p.rlimit(psutil.RLIMIT_NOFILE, (5, 5))
+ self.assertEqual(p.rlimit(psutil.RLIMIT_NOFILE), (5, 5))
+ # If pid is 0 prlimit() applies to the calling process and
+ # we don't want that.
+ if LINUX:
+ with self.assertRaisesRegex(ValueError, "can't use prlimit"):
+ psutil._psplatform.Process(0).rlimit(0)
+ with self.assertRaises(ValueError):
+ p.rlimit(psutil.RLIMIT_NOFILE, (5, 5, 5))
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit(self):
+ p = psutil.Process()
+ testfn = self.get_testfn()
+ soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
+ try:
+ p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard))
+ with open(testfn, "wb") as f:
+ f.write(b"X" * 1024)
+ # write() or flush() doesn't always cause the exception
+ # but close() will.
+ with self.assertRaises(IOError) as exc:
+ with open(testfn, "wb") as f:
+ f.write(b"X" * 1025)
+ self.assertEqual(exc.exception.errno if PY3 else exc.exception[0],
+ errno.EFBIG)
+ finally:
+ p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
+ self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard))
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_infinity(self):
+ # First set a limit, then re-set it by specifying INFINITY
+ # and assume we overridden the previous limit.
+ p = psutil.Process()
+ soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
+ try:
+ p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard))
+ p.rlimit(psutil.RLIMIT_FSIZE, (psutil.RLIM_INFINITY, hard))
+ with open(self.get_testfn(), "wb") as f:
+ f.write(b"X" * 2048)
+ finally:
+ p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
+ self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard))
+
+ @unittest.skipIf(not HAS_RLIMIT, "not supported")
+ def test_rlimit_infinity_value(self):
+ # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really
+ # big number on a platform with large file support. On these
+ # platforms we need to test that the get/setrlimit functions
+ # properly convert the number to a C long long and that the
+ # conversion doesn't raise an error.
+ p = psutil.Process()
+ soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
+ self.assertEqual(psutil.RLIM_INFINITY, hard)
+ p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
+
+ def test_num_threads(self):
+ # on certain platforms such as Linux we might test for exact
+ # thread number, since we always have with 1 thread per process,
+ # but this does not apply across all platforms (MACOS, Windows)
+ p = psutil.Process()
+ if OPENBSD:
+ try:
+ step1 = p.num_threads()
+ except psutil.AccessDenied:
+ raise unittest.SkipTest("on OpenBSD this requires root access")
+ else:
+ step1 = p.num_threads()
+
+ with ThreadTask():
+ step2 = p.num_threads()
+ self.assertEqual(step2, step1 + 1)
+
+ @unittest.skipIf(not WINDOWS, 'WINDOWS only')
+ def test_num_handles(self):
+ # a better test is done later into test/_windows.py
+ p = psutil.Process()
+ self.assertGreater(p.num_handles(), 0)
+
+ @unittest.skipIf(not HAS_THREADS, 'not supported')
+ def test_threads(self):
+ p = psutil.Process()
+ if OPENBSD:
+ try:
+ step1 = p.threads()
+ except psutil.AccessDenied:
+ raise unittest.SkipTest("on OpenBSD this requires root access")
+ else:
+ step1 = p.threads()
+
+ with ThreadTask():
+ step2 = p.threads()
+ self.assertEqual(len(step2), len(step1) + 1)
+ athread = step2[0]
+ # test named tuple
+ self.assertEqual(athread.id, athread[0])
+ self.assertEqual(athread.user_time, athread[1])
+ self.assertEqual(athread.system_time, athread[2])
+
+ @retry_on_failure()
+ @skip_on_access_denied(only_if=MACOS)
+ @unittest.skipIf(not HAS_THREADS, 'not supported')
+ def test_threads_2(self):
+ p = self.spawn_psproc()
+ if OPENBSD:
+ try:
+ p.threads()
+ except psutil.AccessDenied:
+ raise unittest.SkipTest(
+ "on OpenBSD this requires root access")
+ self.assertAlmostEqual(
+ p.cpu_times().user,
+ sum([x.user_time for x in p.threads()]), delta=0.1)
+ self.assertAlmostEqual(
+ p.cpu_times().system,
+ sum([x.system_time for x in p.threads()]), delta=0.1)
+
+ @retry_on_failure()
+ def test_memory_info(self):
+ p = psutil.Process()
+
+ # step 1 - get a base value to compare our results
+ rss1, vms1 = p.memory_info()[:2]
+ percent1 = p.memory_percent()
+ self.assertGreater(rss1, 0)
+ self.assertGreater(vms1, 0)
+
+ # step 2 - allocate some memory
+ memarr = [None] * 1500000
+
+ rss2, vms2 = p.memory_info()[:2]
+ percent2 = p.memory_percent()
+
+ # step 3 - make sure that the memory usage bumped up
+ self.assertGreater(rss2, rss1)
+ self.assertGreaterEqual(vms2, vms1) # vms might be equal
+ self.assertGreater(percent2, percent1)
+ del memarr
+
+ if WINDOWS:
+ mem = p.memory_info()
+ self.assertEqual(mem.rss, mem.wset)
+ self.assertEqual(mem.vms, mem.pagefile)
+
+ mem = p.memory_info()
+ for name in mem._fields:
+ self.assertGreaterEqual(getattr(mem, name), 0)
+
+ def test_memory_full_info(self):
+ p = psutil.Process()
+ total = psutil.virtual_memory().total
+ mem = p.memory_full_info()
+ for name in mem._fields:
+ value = getattr(mem, name)
+ self.assertGreaterEqual(value, 0, msg=(name, value))
+ if name == 'vms' and OSX or LINUX:
+ continue
+ self.assertLessEqual(value, total, msg=(name, value, total))
+ if LINUX or WINDOWS or MACOS:
+ self.assertGreaterEqual(mem.uss, 0)
+ if LINUX:
+ self.assertGreaterEqual(mem.pss, 0)
+ self.assertGreaterEqual(mem.swap, 0)
+
+ @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
+ def test_memory_maps(self):
+ p = psutil.Process()
+ maps = p.memory_maps()
+ paths = [x for x in maps]
+ self.assertEqual(len(paths), len(set(paths)))
+ ext_maps = p.memory_maps(grouped=False)
+
+ for nt in maps:
+ if not nt.path.startswith('['):
+ assert os.path.isabs(nt.path), nt.path
+ if POSIX:
+ try:
+ assert os.path.exists(nt.path) or \
+ os.path.islink(nt.path), nt.path
+ except AssertionError:
+ if not LINUX:
+ raise
+ else:
+ # https://github.com/giampaolo/psutil/issues/759
+ with open_text('/proc/self/smaps') as f:
+ data = f.read()
+ if "%s (deleted)" % nt.path not in data:
+ raise
+ else:
+ # XXX - On Windows we have this strange behavior with
+ # 64 bit dlls: they are visible via explorer but cannot
+ # be accessed via os.stat() (wtf?).
+ if '64' not in os.path.basename(nt.path):
+ try:
+ st = os.stat(nt.path)
+ except FileNotFoundError:
+ pass
+ else:
+ assert stat.S_ISREG(st.st_mode), nt.path
+ for nt in ext_maps:
+ for fname in nt._fields:
+ value = getattr(nt, fname)
+ if fname == 'path':
+ continue
+ elif fname in ('addr', 'perms'):
+ assert value, value
+ else:
+ self.assertIsInstance(value, (int, long))
+ assert value >= 0, value
+
+ @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
+ def test_memory_maps_lists_lib(self):
+ # Make sure a newly loaded shared lib is listed.
+ p = psutil.Process()
+ with copyload_shared_lib() as path:
+ def normpath(p):
+ return os.path.realpath(os.path.normcase(p))
+ libpaths = [normpath(x.path)
+ for x in p.memory_maps()]
+ self.assertIn(normpath(path), libpaths)
+
+ def test_memory_percent(self):
+ p = psutil.Process()
+ p.memory_percent()
+ self.assertRaises(ValueError, p.memory_percent, memtype="?!?")
+ if LINUX or MACOS or WINDOWS:
+ p.memory_percent(memtype='uss')
+
+ def test_is_running(self):
+ p = self.spawn_psproc()
+ assert p.is_running()
+ assert p.is_running()
+ p.kill()
+ p.wait()
+ assert not p.is_running()
+ assert not p.is_running()
+
+ def test_exe(self):
+ p = self.spawn_psproc()
+ exe = p.exe()
+ try:
+ self.assertEqual(exe, PYTHON_EXE)
+ except AssertionError:
+ if WINDOWS and len(exe) == len(PYTHON_EXE):
+ # on Windows we don't care about case sensitivity
+ normcase = os.path.normcase
+ self.assertEqual(normcase(exe), normcase(PYTHON_EXE))
+ else:
+ # certain platforms such as BSD are more accurate returning:
+ # "/usr/local/bin/python2.7"
+ # ...instead of:
+ # "/usr/local/bin/python"
+ # We do not want to consider this difference in accuracy
+ # an error.
+ ver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
+ try:
+ self.assertEqual(exe.replace(ver, ''),
+ PYTHON_EXE.replace(ver, ''))
+ except AssertionError:
+ # Typically MACOS. Really not sure what to do here.
+ pass
+
+ out = sh([exe, "-c", "import os; print('hey')"])
+ self.assertEqual(out, 'hey')
+
+ def test_cmdline(self):
+ cmdline = [PYTHON_EXE, "-c", "import time; time.sleep(60)"]
+ p = self.spawn_psproc(cmdline)
+ # XXX - most of the times the underlying sysctl() call on Net
+ # and Open BSD returns a truncated string.
+ # Also /proc/pid/cmdline behaves the same so it looks
+ # like this is a kernel bug.
+ # XXX - AIX truncates long arguments in /proc/pid/cmdline
+ if NETBSD or OPENBSD or AIX:
+ self.assertEqual(p.cmdline()[0], PYTHON_EXE)
+ else:
+ if MACOS and CI_TESTING:
+ pyexe = p.cmdline()[0]
+ if pyexe != PYTHON_EXE:
+ self.assertEqual(' '.join(p.cmdline()[1:]),
+ ' '.join(cmdline[1:]))
+ return
+ self.assertEqual(' '.join(p.cmdline()), ' '.join(cmdline))
+
+ @unittest.skipIf(PYPY, "broken on PYPY")
+ def test_long_cmdline(self):
+ testfn = self.get_testfn()
+ create_exe(testfn)
+ cmdline = [testfn] + (["0123456789"] * 20)
+ p = self.spawn_psproc(cmdline)
+ self.assertEqual(p.cmdline(), cmdline)
+
+ def test_name(self):
+ p = self.spawn_psproc(PYTHON_EXE)
+ name = p.name().lower()
+ pyexe = os.path.basename(os.path.realpath(sys.executable)).lower()
+ assert pyexe.startswith(name), (pyexe, name)
+
+ @unittest.skipIf(PYPY, "unreliable on PYPY")
+ def test_long_name(self):
+ testfn = self.get_testfn(suffix="0123456789" * 2)
+ create_exe(testfn)
+ p = self.spawn_psproc(testfn)
+ self.assertEqual(p.name(), os.path.basename(testfn))
+
+ # XXX
+ @unittest.skipIf(SUNOS, "broken on SUNOS")
+ @unittest.skipIf(AIX, "broken on AIX")
+ @unittest.skipIf(PYPY, "broken on PYPY")
+ def test_prog_w_funky_name(self):
+ # Test that name(), exe() and cmdline() correctly handle programs
+ # with funky chars such as spaces and ")", see:
+ # https://github.com/giampaolo/psutil/issues/628
+ funky_path = self.get_testfn(suffix='foo bar )')
+ create_exe(funky_path)
+ cmdline = [funky_path, "-c",
+ "import time; [time.sleep(0.01) for x in range(3000)];"
+ "arg1", "arg2", "", "arg3", ""]
+ p = self.spawn_psproc(cmdline)
+ self.assertEqual(p.cmdline(), cmdline)
+ self.assertEqual(p.name(), os.path.basename(funky_path))
+ self.assertEqual(os.path.normcase(p.exe()),
+ os.path.normcase(funky_path))
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_uids(self):
+ p = psutil.Process()
+ real, effective, saved = p.uids()
+ # os.getuid() refers to "real" uid
+ self.assertEqual(real, os.getuid())
+ # os.geteuid() refers to "effective" uid
+ self.assertEqual(effective, os.geteuid())
+ # No such thing as os.getsuid() ("saved" uid), but starting
+ # from python 2.7 we have os.getresuid() which returns all
+ # of them.
+ if hasattr(os, "getresuid"):
+ self.assertEqual(os.getresuid(), p.uids())
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_gids(self):
+ p = psutil.Process()
+ real, effective, saved = p.gids()
+ # os.getuid() refers to "real" uid
+ self.assertEqual(real, os.getgid())
+ # os.geteuid() refers to "effective" uid
+ self.assertEqual(effective, os.getegid())
+ # No such thing as os.getsgid() ("saved" gid), but starting
+ # from python 2.7 we have os.getresgid() which returns all
+ # of them.
+ if hasattr(os, "getresuid"):
+ self.assertEqual(os.getresgid(), p.gids())
+
+ def test_nice(self):
+ p = psutil.Process()
+ self.assertRaises(TypeError, p.nice, "str")
+ init = p.nice()
+ try:
+ if WINDOWS:
+ # A CI runner may limit our maximum priority, which will break
+ # this test. Instead, we test in order of increasing priority,
+ # and match either the expected value or the highest so far.
+ highest_prio = None
+ for prio in [psutil.IDLE_PRIORITY_CLASS,
+ psutil.BELOW_NORMAL_PRIORITY_CLASS,
+ psutil.NORMAL_PRIORITY_CLASS,
+ psutil.ABOVE_NORMAL_PRIORITY_CLASS,
+ psutil.HIGH_PRIORITY_CLASS,
+ psutil.REALTIME_PRIORITY_CLASS]:
+ with self.subTest(prio=prio):
+ try:
+ p.nice(prio)
+ except psutil.AccessDenied:
+ pass
+ else:
+ new_prio = p.nice()
+ if CI_TESTING:
+ if new_prio == prio or highest_prio is None:
+ highest_prio = prio
+ self.assertEqual(new_prio, highest_prio)
+ else:
+ self.assertEqual(new_prio, prio)
+ else:
+ try:
+ if hasattr(os, "getpriority"):
+ self.assertEqual(
+ os.getpriority(os.PRIO_PROCESS, os.getpid()),
+ p.nice())
+ p.nice(1)
+ self.assertEqual(p.nice(), 1)
+ if hasattr(os, "getpriority"):
+ self.assertEqual(
+ os.getpriority(os.PRIO_PROCESS, os.getpid()),
+ p.nice())
+ # XXX - going back to previous nice value raises
+ # AccessDenied on MACOS
+ if not MACOS:
+ p.nice(0)
+ self.assertEqual(p.nice(), 0)
+ except psutil.AccessDenied:
+ pass
+ finally:
+ try:
+ p.nice(init)
+ except psutil.AccessDenied:
+ pass
+
+ def test_status(self):
+ p = psutil.Process()
+ self.assertEqual(p.status(), psutil.STATUS_RUNNING)
+
+ def test_username(self):
+ p = self.spawn_psproc()
+ username = p.username()
+ if WINDOWS:
+ domain, username = username.split('\\')
+ getpass_user = getpass.getuser()
+ if getpass_user.endswith('$'):
+ # When running as a service account (most likely to be
+ # NetworkService), these user name calculations don't produce
+ # the same result, causing the test to fail.
+ raise unittest.SkipTest('running as service account')
+ self.assertEqual(username, getpass_user)
+ if 'USERDOMAIN' in os.environ:
+ self.assertEqual(domain, os.environ['USERDOMAIN'])
+ else:
+ self.assertEqual(username, getpass.getuser())
+
+ def test_cwd(self):
+ p = self.spawn_psproc()
+ self.assertEqual(p.cwd(), os.getcwd())
+
+ def test_cwd_2(self):
+ cmd = [PYTHON_EXE, "-c",
+ "import os, time; os.chdir('..'); time.sleep(60)"]
+ p = self.spawn_psproc(cmd)
+ call_until(p.cwd, "ret == os.path.dirname(os.getcwd())")
+
+ @unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
+ def test_cpu_affinity(self):
+ p = psutil.Process()
+ initial = p.cpu_affinity()
+ assert initial, initial
+ self.addCleanup(p.cpu_affinity, initial)
+
+ if hasattr(os, "sched_getaffinity"):
+ self.assertEqual(initial, list(os.sched_getaffinity(p.pid)))
+ self.assertEqual(len(initial), len(set(initial)))
+
+ all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
+ for n in all_cpus:
+ p.cpu_affinity([n])
+ self.assertEqual(p.cpu_affinity(), [n])
+ if hasattr(os, "sched_getaffinity"):
+ self.assertEqual(p.cpu_affinity(),
+ list(os.sched_getaffinity(p.pid)))
+ # also test num_cpu()
+ if hasattr(p, "num_cpu"):
+ self.assertEqual(p.cpu_affinity()[0], p.num_cpu())
+
+ # [] is an alias for "all eligible CPUs"; on Linux this may
+ # not be equal to all available CPUs, see:
+ # https://github.com/giampaolo/psutil/issues/956
+ p.cpu_affinity([])
+ if LINUX:
+ self.assertEqual(p.cpu_affinity(), p._proc._get_eligible_cpus())
+ else:
+ self.assertEqual(p.cpu_affinity(), all_cpus)
+ if hasattr(os, "sched_getaffinity"):
+ self.assertEqual(p.cpu_affinity(),
+ list(os.sched_getaffinity(p.pid)))
+ #
+ self.assertRaises(TypeError, p.cpu_affinity, 1)
+ p.cpu_affinity(initial)
+ # it should work with all iterables, not only lists
+ p.cpu_affinity(set(all_cpus))
+ p.cpu_affinity(tuple(all_cpus))
+
+ @unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
+ def test_cpu_affinity_errs(self):
+ p = self.spawn_psproc()
+ invalid_cpu = [len(psutil.cpu_times(percpu=True)) + 10]
+ self.assertRaises(ValueError, p.cpu_affinity, invalid_cpu)
+ self.assertRaises(ValueError, p.cpu_affinity, range(10000, 11000))
+ self.assertRaises(TypeError, p.cpu_affinity, [0, "1"])
+ self.assertRaises(ValueError, p.cpu_affinity, [0, -1])
+
+ @unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
+ def test_cpu_affinity_all_combinations(self):
+ p = psutil.Process()
+ initial = p.cpu_affinity()
+ assert initial, initial
+ self.addCleanup(p.cpu_affinity, initial)
+
+ # All possible CPU set combinations.
+ if len(initial) > 12:
+ initial = initial[:12] # ...otherwise it will take forever
+ combos = []
+ for i in range(0, len(initial) + 1):
+ for subset in itertools.combinations(initial, i):
+ if subset:
+ combos.append(list(subset))
+
+ for combo in combos:
+ p.cpu_affinity(combo)
+ self.assertEqual(sorted(p.cpu_affinity()), sorted(combo))
+
+ # TODO: #595
+ @unittest.skipIf(BSD, "broken on BSD")
+ # can't find any process file on Appveyor
+ @unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR")
+ def test_open_files(self):
+ p = psutil.Process()
+ testfn = self.get_testfn()
+ files = p.open_files()
+ self.assertNotIn(testfn, files)
+ with open(testfn, 'wb') as f:
+ f.write(b'x' * 1024)
+ f.flush()
+ # give the kernel some time to see the new file
+ files = call_until(p.open_files, "len(ret) != %i" % len(files))
+ filenames = [os.path.normcase(x.path) for x in files]
+ self.assertIn(os.path.normcase(testfn), filenames)
+ if LINUX:
+ for file in files:
+ if file.path == testfn:
+ self.assertEqual(file.position, 1024)
+ for file in files:
+ assert os.path.isfile(file.path), file
+
+ # another process
+ cmdline = "import time; f = open(r'%s', 'r'); time.sleep(60);" % testfn
+ p = self.spawn_psproc([PYTHON_EXE, "-c", cmdline])
+
+ for x in range(100):
+ filenames = [os.path.normcase(x.path) for x in p.open_files()]
+ if testfn in filenames:
+ break
+ time.sleep(.01)
+ else:
+ self.assertIn(os.path.normcase(testfn), filenames)
+ for file in filenames:
+ assert os.path.isfile(file), file
+
+ # TODO: #595
+ @unittest.skipIf(BSD, "broken on BSD")
+ # can't find any process file on Appveyor
+ @unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR")
+ def test_open_files_2(self):
+ # test fd and path fields
+ p = psutil.Process()
+ normcase = os.path.normcase
+ testfn = self.get_testfn()
+ with open(testfn, 'w') as fileobj:
+ for file in p.open_files():
+ if normcase(file.path) == normcase(fileobj.name) or \
+ file.fd == fileobj.fileno():
+ break
+ else:
+ raise self.fail("no file found; files=%s" % (
+ repr(p.open_files())))
+ self.assertEqual(normcase(file.path), normcase(fileobj.name))
+ if WINDOWS:
+ self.assertEqual(file.fd, -1)
+ else:
+ self.assertEqual(file.fd, fileobj.fileno())
+ # test positions
+ ntuple = p.open_files()[0]
+ self.assertEqual(ntuple[0], ntuple.path)
+ self.assertEqual(ntuple[1], ntuple.fd)
+ # test file is gone
+ self.assertNotIn(fileobj.name, p.open_files())
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_num_fds(self):
+ p = psutil.Process()
+ testfn = self.get_testfn()
+ start = p.num_fds()
+ file = open(testfn, 'w')
+ self.addCleanup(file.close)
+ self.assertEqual(p.num_fds(), start + 1)
+ sock = socket.socket()
+ self.addCleanup(sock.close)
+ self.assertEqual(p.num_fds(), start + 2)
+ file.close()
+ sock.close()
+ self.assertEqual(p.num_fds(), start)
+
+ @skip_on_not_implemented(only_if=LINUX)
+ @unittest.skipIf(OPENBSD or NETBSD, "not reliable on OPENBSD & NETBSD")
+ def test_num_ctx_switches(self):
+ p = psutil.Process()
+ before = sum(p.num_ctx_switches())
+ for x in range(500000):
+ after = sum(p.num_ctx_switches())
+ if after > before:
+ return
+ raise self.fail(
+ "num ctx switches still the same after 50.000 iterations")
+
+ def test_ppid(self):
+ p = psutil.Process()
+ if hasattr(os, 'getppid'):
+ self.assertEqual(p.ppid(), os.getppid())
+ p = self.spawn_psproc()
+ self.assertEqual(p.ppid(), os.getpid())
+ if APPVEYOR:
+ # Occasional failures, see:
+ # https://ci.appveyor.com/project/giampaolo/psutil/build/
+ # job/0hs623nenj7w4m33
+ return
+
+ def test_parent(self):
+ p = self.spawn_psproc()
+ self.assertEqual(p.parent().pid, os.getpid())
+
+ lowest_pid = psutil.pids()[0]
+ self.assertIsNone(psutil.Process(lowest_pid).parent())
+
+ def test_parent_multi(self):
+ parent = psutil.Process()
+ child, grandchild = self.spawn_children_pair()
+ self.assertEqual(grandchild.parent(), child)
+ self.assertEqual(child.parent(), parent)
+
+ def test_parent_disappeared(self):
+ # Emulate a case where the parent process disappeared.
+ p = self.spawn_psproc()
+ with mock.patch("psutil.Process",
+ side_effect=psutil.NoSuchProcess(0, 'foo')):
+ self.assertIsNone(p.parent())
+
+ @retry_on_failure()
+ def test_parents(self):
+ parent = psutil.Process()
+ assert parent.parents()
+ child, grandchild = self.spawn_children_pair()
+ self.assertEqual(child.parents()[0], parent)
+ self.assertEqual(grandchild.parents()[0], child)
+ self.assertEqual(grandchild.parents()[1], parent)
+
+ def test_children(self):
+ parent = psutil.Process()
+ self.assertEqual(parent.children(), [])
+ self.assertEqual(parent.children(recursive=True), [])
+ # On Windows we set the flag to 0 in order to cancel out the
+ # CREATE_NO_WINDOW flag (enabled by default) which creates
+ # an extra "conhost.exe" child.
+ child = self.spawn_psproc(creationflags=0)
+ children1 = parent.children()
+ children2 = parent.children(recursive=True)
+ for children in (children1, children2):
+ self.assertEqual(len(children), 1)
+ self.assertEqual(children[0].pid, child.pid)
+ self.assertEqual(children[0].ppid(), parent.pid)
+
+ def test_children_recursive(self):
+ # Test children() against two sub processes, p1 and p2, where
+ # p1 (our child) spawned p2 (our grandchild).
+ parent = psutil.Process()
+ child, grandchild = self.spawn_children_pair()
+ self.assertEqual(parent.children(), [child])
+ self.assertEqual(parent.children(recursive=True), [child, grandchild])
+ # If the intermediate process is gone there's no way for
+ # children() to recursively find it.
+ child.terminate()
+ child.wait()
+ self.assertEqual(parent.children(recursive=True), [])
+
+ def test_children_duplicates(self):
+ # find the process which has the highest number of children
+ table = collections.defaultdict(int)
+ for p in psutil.process_iter():
+ try:
+ table[p.ppid()] += 1
+ except psutil.Error:
+ pass
+ # this is the one, now let's make sure there are no duplicates
+ pid = sorted(table.items(), key=lambda x: x[1])[-1][0]
+ if LINUX and pid == 0:
+ raise self.skipTest("PID 0")
+ p = psutil.Process(pid)
+ try:
+ c = p.children(recursive=True)
+ except psutil.AccessDenied: # windows
+ pass
+ else:
+ self.assertEqual(len(c), len(set(c)))
+
+ def test_parents_and_children(self):
+ parent = psutil.Process()
+ child, grandchild = self.spawn_children_pair()
+ # forward
+ children = parent.children(recursive=True)
+ self.assertEqual(len(children), 2)
+ self.assertEqual(children[0], child)
+ self.assertEqual(children[1], grandchild)
+ # backward
+ parents = grandchild.parents()
+ self.assertEqual(parents[0], child)
+ self.assertEqual(parents[1], parent)
+
+ def test_suspend_resume(self):
+ p = self.spawn_psproc()
+ p.suspend()
+ for x in range(100):
+ if p.status() == psutil.STATUS_STOPPED:
+ break
+ time.sleep(0.01)
+ p.resume()
+ self.assertNotEqual(p.status(), psutil.STATUS_STOPPED)
+
+ def test_invalid_pid(self):
+ self.assertRaises(TypeError, psutil.Process, "1")
+ self.assertRaises(ValueError, psutil.Process, -1)
+
+ def test_as_dict(self):
+ p = psutil.Process()
+ d = p.as_dict(attrs=['exe', 'name'])
+ self.assertEqual(sorted(d.keys()), ['exe', 'name'])
+
+ p = psutil.Process(min(psutil.pids()))
+ d = p.as_dict(attrs=['connections'], ad_value='foo')
+ if not isinstance(d['connections'], list):
+ self.assertEqual(d['connections'], 'foo')
+
+ # Test ad_value is set on AccessDenied.
+ with mock.patch('psutil.Process.nice', create=True,
+ side_effect=psutil.AccessDenied):
+ self.assertEqual(
+ p.as_dict(attrs=["nice"], ad_value=1), {"nice": 1})
+
+ # Test that NoSuchProcess bubbles up.
+ with mock.patch('psutil.Process.nice', create=True,
+ side_effect=psutil.NoSuchProcess(p.pid, "name")):
+ self.assertRaises(
+ psutil.NoSuchProcess, p.as_dict, attrs=["nice"])
+
+ # Test that ZombieProcess is swallowed.
+ with mock.patch('psutil.Process.nice', create=True,
+ side_effect=psutil.ZombieProcess(p.pid, "name")):
+ self.assertEqual(
+ p.as_dict(attrs=["nice"], ad_value="foo"), {"nice": "foo"})
+
+ # By default APIs raising NotImplementedError are
+ # supposed to be skipped.
+ with mock.patch('psutil.Process.nice', create=True,
+ side_effect=NotImplementedError):
+ d = p.as_dict()
+ self.assertNotIn('nice', list(d.keys()))
+ # ...unless the user explicitly asked for some attr.
+ with self.assertRaises(NotImplementedError):
+ p.as_dict(attrs=["nice"])
+
+ # errors
+ with self.assertRaises(TypeError):
+ p.as_dict('name')
+ with self.assertRaises(ValueError):
+ p.as_dict(['foo'])
+ with self.assertRaises(ValueError):
+ p.as_dict(['foo', 'bar'])
+
+ def test_oneshot(self):
+ p = psutil.Process()
+ with mock.patch("psutil._psplatform.Process.cpu_times") as m:
+ with p.oneshot():
+ p.cpu_times()
+ p.cpu_times()
+ self.assertEqual(m.call_count, 1)
+
+ with mock.patch("psutil._psplatform.Process.cpu_times") as m:
+ p.cpu_times()
+ p.cpu_times()
+ self.assertEqual(m.call_count, 2)
+
+ def test_oneshot_twice(self):
+ # Test the case where the ctx manager is __enter__ed twice.
+ # The second __enter__ is supposed to resut in a NOOP.
+ p = psutil.Process()
+ with mock.patch("psutil._psplatform.Process.cpu_times") as m1:
+ with mock.patch("psutil._psplatform.Process.oneshot_enter") as m2:
+ with p.oneshot():
+ p.cpu_times()
+ p.cpu_times()
+ with p.oneshot():
+ p.cpu_times()
+ p.cpu_times()
+ self.assertEqual(m1.call_count, 1)
+ self.assertEqual(m2.call_count, 1)
+
+ with mock.patch("psutil._psplatform.Process.cpu_times") as m:
+ p.cpu_times()
+ p.cpu_times()
+ self.assertEqual(m.call_count, 2)
+
+ def test_oneshot_cache(self):
+ # Make sure oneshot() cache is nonglobal. Instead it's
+ # supposed to be bound to the Process instance, see:
+ # https://github.com/giampaolo/psutil/issues/1373
+ p1, p2 = self.spawn_children_pair()
+ p1_ppid = p1.ppid()
+ p2_ppid = p2.ppid()
+ self.assertNotEqual(p1_ppid, p2_ppid)
+ with p1.oneshot():
+ self.assertEqual(p1.ppid(), p1_ppid)
+ self.assertEqual(p2.ppid(), p2_ppid)
+ with p2.oneshot():
+ self.assertEqual(p1.ppid(), p1_ppid)
+ self.assertEqual(p2.ppid(), p2_ppid)
+
+ def test_halfway_terminated_process(self):
+ # Test that NoSuchProcess exception gets raised in case the
+ # process dies after we create the Process object.
+ # Example:
+ # >>> proc = Process(1234)
+ # >>> time.sleep(2) # time-consuming task, process dies in meantime
+ # >>> proc.name()
+ # Refers to Issue #15
+ def assert_raises_nsp(fun, fun_name):
+ try:
+ ret = fun()
+ except psutil.ZombieProcess: # differentiate from NSP
+ raise
+ except psutil.NoSuchProcess:
+ pass
+ except psutil.AccessDenied:
+ if OPENBSD and fun_name in ('threads', 'num_threads'):
+ return
+ raise
+ else:
+ # NtQuerySystemInformation succeeds even if process is gone.
+ if WINDOWS and fun_name in ('exe', 'name'):
+ return
+ raise self.fail("%r didn't raise NSP and returned %r "
+ "instead" % (fun, ret))
+
+ p = self.spawn_psproc()
+ p.terminate()
+ p.wait()
+ if WINDOWS: # XXX
+ call_until(psutil.pids, "%s not in ret" % p.pid)
+ self.assertProcessGone(p)
+
+ ns = process_namespace(p)
+ for fun, name in ns.iter(ns.all):
+ assert_raises_nsp(fun, name)
+
+ # NtQuerySystemInformation succeeds even if process is gone.
+ if WINDOWS and not GITHUB_ACTIONS:
+ normcase = os.path.normcase
+ self.assertEqual(normcase(p.exe()), normcase(PYTHON_EXE))
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_zombie_process(self):
+ def succeed_or_zombie_p_exc(fun):
+ try:
+ return fun()
+ except (psutil.ZombieProcess, psutil.AccessDenied):
+ pass
+
+ parent, zombie = self.spawn_zombie()
+ # A zombie process should always be instantiable
+ zproc = psutil.Process(zombie.pid)
+ # ...and at least its status always be querable
+ self.assertEqual(zproc.status(), psutil.STATUS_ZOMBIE)
+ # ...and it should be considered 'running'
+ assert zproc.is_running()
+ # ...and as_dict() shouldn't crash
+ zproc.as_dict()
+ # ...its parent should 'see' it (edit: not true on BSD and MACOS
+ # descendants = [x.pid for x in psutil.Process().children(
+ # recursive=True)]
+ # self.assertIn(zpid, descendants)
+ # XXX should we also assume ppid be usable? Note: this
+ # would be an important use case as the only way to get
+ # rid of a zombie is to kill its parent.
+ # self.assertEqual(zpid.ppid(), os.getpid())
+ # ...and all other APIs should be able to deal with it
+
+ ns = process_namespace(zproc)
+ for fun, name in ns.iter(ns.all):
+ succeed_or_zombie_p_exc(fun)
+
+ assert psutil.pid_exists(zproc.pid)
+ self.assertIn(zproc.pid, psutil.pids())
+ self.assertIn(zproc.pid, [x.pid for x in psutil.process_iter()])
+ psutil._pmap = {}
+ self.assertIn(zproc.pid, [x.pid for x in psutil.process_iter()])
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_zombie_process_is_running_w_exc(self):
+ # Emulate a case where internally is_running() raises
+ # ZombieProcess.
+ p = psutil.Process()
+ with mock.patch("psutil.Process",
+ side_effect=psutil.ZombieProcess(0)) as m:
+ assert p.is_running()
+ assert m.called
+
+ @unittest.skipIf(not POSIX, 'POSIX only')
+ def test_zombie_process_status_w_exc(self):
+ # Emulate a case where internally status() raises
+ # ZombieProcess.
+ p = psutil.Process()
+ with mock.patch("psutil._psplatform.Process.status",
+ side_effect=psutil.ZombieProcess(0)) as m:
+ self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
+ assert m.called
+
+ def test_reused_pid(self):
+ # Emulate a case where PID has been reused by another process.
+ subp = self.spawn_testproc()
+ p = psutil.Process(subp.pid)
+ p._ident = (p.pid, p.create_time() + 100)
+ assert not p.is_running()
+ assert p != psutil.Process(subp.pid)
+ msg = "process no longer exists and its PID has been reused"
+ self.assertRaisesRegex(psutil.NoSuchProcess, msg, p.suspend)
+ self.assertRaisesRegex(psutil.NoSuchProcess, msg, p.resume)
+ self.assertRaisesRegex(psutil.NoSuchProcess, msg, p.terminate)
+ self.assertRaisesRegex(psutil.NoSuchProcess, msg, p.kill)
+ self.assertRaisesRegex(psutil.NoSuchProcess, msg, p.children)
+
+ def test_pid_0(self):
+ # Process(0) is supposed to work on all platforms except Linux
+ if 0 not in psutil.pids():
+ self.assertRaises(psutil.NoSuchProcess, psutil.Process, 0)
+ # These 2 are a contradiction, but "ps" says PID 1's parent
+ # is PID 0.
+ assert not psutil.pid_exists(0)
+ self.assertEqual(psutil.Process(1).ppid(), 0)
+ return
+
+ p = psutil.Process(0)
+ exc = psutil.AccessDenied if WINDOWS else ValueError
+ self.assertRaises(exc, p.wait)
+ self.assertRaises(exc, p.terminate)
+ self.assertRaises(exc, p.suspend)
+ self.assertRaises(exc, p.resume)
+ self.assertRaises(exc, p.kill)
+ self.assertRaises(exc, p.send_signal, signal.SIGTERM)
+
+ # test all methods
+ ns = process_namespace(p)
+ for fun, name in ns.iter(ns.getters + ns.setters):
+ try:
+ ret = fun()
+ except psutil.AccessDenied:
+ pass
+ else:
+ if name in ("uids", "gids"):
+ self.assertEqual(ret.real, 0)
+ elif name == "username":
+ user = 'NT AUTHORITY\\SYSTEM' if WINDOWS else 'root'
+ self.assertEqual(p.username(), user)
+ elif name == "name":
+ assert name, name
+
+ if not OPENBSD:
+ self.assertIn(0, psutil.pids())
+ assert psutil.pid_exists(0)
+
+ @unittest.skipIf(not HAS_ENVIRON, "not supported")
+ def test_environ(self):
+ def clean_dict(d):
+ # Most of these are problematic on Travis.
+ d.pop("PLAT", None)
+ d.pop("HOME", None)
+ if MACOS:
+ d.pop("__CF_USER_TEXT_ENCODING", None)
+ d.pop("VERSIONER_PYTHON_PREFER_32_BIT", None)
+ d.pop("VERSIONER_PYTHON_VERSION", None)
+ return dict(
+ [(k.replace("\r", "").replace("\n", ""),
+ v.replace("\r", "").replace("\n", ""))
+ for k, v in d.items()])
+
+ self.maxDiff = None
+ p = psutil.Process()
+ d1 = clean_dict(p.environ())
+ d2 = clean_dict(os.environ.copy())
+ if not OSX and GITHUB_ACTIONS:
+ self.assertEqual(d1, d2)
+
+ @unittest.skipIf(not HAS_ENVIRON, "not supported")
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @unittest.skipIf(
+ MACOS_11PLUS,
+ "macOS 11+ can't get another process environment, issue #2084"
+ )
+ def test_weird_environ(self):
+ # environment variables can contain values without an equals sign
+ code = textwrap.dedent("""
+ #include <unistd.h>
+ #include <fcntl.h>
+
+ char * const argv[] = {"cat", 0};
+ char * const envp[] = {"A=1", "X", "C=3", 0};
+
+ int main(void) {
+ // Close stderr on exec so parent can wait for the
+ // execve to finish.
+ if (fcntl(2, F_SETFD, FD_CLOEXEC) != 0)
+ return 0;
+ return execve("/bin/cat", argv, envp);
+ }
+ """)
+ path = self.get_testfn()
+ create_exe(path, c_code=code)
+ sproc = self.spawn_testproc(
+ [path], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = psutil.Process(sproc.pid)
+ wait_for_pid(p.pid)
+ assert p.is_running()
+ # Wait for process to exec or exit.
+ self.assertEqual(sproc.stderr.read(), b"")
+ if MACOS and CI_TESTING:
+ try:
+ env = p.environ()
+ except psutil.AccessDenied:
+ # XXX: fails sometimes with:
+ # PermissionError from 'sysctl(KERN_PROCARGS2) -> EIO'
+ return
+ else:
+ env = p.environ()
+ self.assertEqual(env, {"A": "1", "C": "3"})
+ sproc.communicate()
+ self.assertEqual(sproc.returncode, 0)
+
+
+# ===================================================================
+# --- Limited user tests
+# ===================================================================
+
+
+if POSIX and os.getuid() == 0:
+
+ class LimitedUserTestCase(TestProcess):
+ """Repeat the previous tests by using a limited user.
+ Executed only on UNIX and only if the user who run the test script
+ is root.
+ """
+ # the uid/gid the test suite runs under
+ if hasattr(os, 'getuid'):
+ PROCESS_UID = os.getuid()
+ PROCESS_GID = os.getgid()
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # re-define all existent test methods in order to
+ # ignore AccessDenied exceptions
+ for attr in [x for x in dir(self) if x.startswith('test')]:
+ meth = getattr(self, attr)
+
+ def test_(self):
+ try:
+ meth() # noqa
+ except psutil.AccessDenied:
+ pass
+ setattr(self, attr, types.MethodType(test_, self))
+
+ def setUp(self):
+ super().setUp()
+ os.setegid(1000)
+ os.seteuid(1000)
+
+ def tearDown(self):
+ os.setegid(self.PROCESS_UID)
+ os.seteuid(self.PROCESS_GID)
+ super().tearDown()
+
+ def test_nice(self):
+ try:
+ psutil.Process().nice(-1)
+ except psutil.AccessDenied:
+ pass
+ else:
+ raise self.fail("exception not raised")
+
+ @unittest.skipIf(1, "causes problem as root")
+ def test_zombie_process(self):
+ pass
+
+
+# ===================================================================
+# --- psutil.Popen tests
+# ===================================================================
+
+
+class TestPopen(PsutilTestCase):
+ """Tests for psutil.Popen class."""
+
+ @classmethod
+ def tearDownClass(cls):
+ reap_children()
+
+ def test_misc(self):
+ # XXX this test causes a ResourceWarning on Python 3 because
+ # psutil.__subproc instance doesn't get properly freed.
+ # Not sure what to do though.
+ cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
+ with psutil.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as proc:
+ proc.name()
+ proc.cpu_times()
+ proc.stdin
+ self.assertTrue(dir(proc))
+ self.assertRaises(AttributeError, getattr, proc, 'foo')
+ proc.terminate()
+ if POSIX:
+ self.assertEqual(proc.wait(5), -signal.SIGTERM)
+ else:
+ self.assertEqual(proc.wait(5), signal.SIGTERM)
+
+ def test_ctx_manager(self):
+ with psutil.Popen([PYTHON_EXE, "-V"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE) as proc:
+ proc.communicate()
+ assert proc.stdout.closed
+ assert proc.stderr.closed
+ assert proc.stdin.closed
+ self.assertEqual(proc.returncode, 0)
+
+ def test_kill_terminate(self):
+ # subprocess.Popen()'s terminate(), kill() and send_signal() do
+ # not raise exception after the process is gone. psutil.Popen
+ # diverges from that.
+ cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
+ with psutil.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as proc:
+ proc.terminate()
+ proc.wait()
+ self.assertRaises(psutil.NoSuchProcess, proc.terminate)
+ self.assertRaises(psutil.NoSuchProcess, proc.kill)
+ self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
+ signal.SIGTERM)
+ if WINDOWS:
+ self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
+ signal.CTRL_C_EVENT)
+ self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
+ signal.CTRL_BREAK_EVENT)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_sunos.py b/lib/psutil/tests/test_sunos.py
new file mode 100644
index 0000000..dd74a49
--- /dev/null
+++ b/lib/psutil/tests/test_sunos.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sun OS specific tests."""
+
+import os
+import unittest
+
+import psutil
+from psutil import SUNOS
+from psutil.tests import PsutilTestCase
+from psutil.tests import sh
+
+
+@unittest.skipIf(not SUNOS, "SUNOS only")
+class SunOSSpecificTestCase(PsutilTestCase):
+
+ def test_swap_memory(self):
+ out = sh('env PATH=/usr/sbin:/sbin:%s swap -l' % os.environ['PATH'])
+ lines = out.strip().split('\n')[1:]
+ if not lines:
+ raise ValueError('no swap device(s) configured')
+ total = free = 0
+ for line in lines:
+ line = line.split()
+ t, f = line[-2:]
+ total += int(int(t) * 512)
+ free += int(int(f) * 512)
+ used = total - free
+
+ psutil_swap = psutil.swap_memory()
+ self.assertEqual(psutil_swap.total, total)
+ self.assertEqual(psutil_swap.used, used)
+ self.assertEqual(psutil_swap.free, free)
+
+ def test_cpu_count(self):
+ out = sh("/usr/sbin/psrinfo")
+ self.assertEqual(psutil.cpu_count(), len(out.split('\n')))
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_system.py b/lib/psutil/tests/test_system.py
new file mode 100644
index 0000000..1722b51
--- /dev/null
+++ b/lib/psutil/tests/test_system.py
@@ -0,0 +1,892 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for system APIS."""
+
+import contextlib
+import datetime
+import errno
+import os
+import platform
+import pprint
+import shutil
+import signal
+import socket
+import sys
+import time
+import unittest
+
+import psutil
+from psutil import AIX
+from psutil import BSD
+from psutil import FREEBSD
+from psutil import LINUX
+from psutil import MACOS
+from psutil import NETBSD
+from psutil import OPENBSD
+from psutil import POSIX
+from psutil import SUNOS
+from psutil import WINDOWS
+from psutil._compat import FileNotFoundError
+from psutil._compat import long
+from psutil.tests import ASCII_FS
+from psutil.tests import CI_TESTING
+from psutil.tests import DEVNULL
+from psutil.tests import GITHUB_ACTIONS
+from psutil.tests import GLOBAL_TIMEOUT
+from psutil.tests import HAS_BATTERY
+from psutil.tests import HAS_CPU_FREQ
+from psutil.tests import HAS_GETLOADAVG
+from psutil.tests import HAS_NET_IO_COUNTERS
+from psutil.tests import HAS_SENSORS_BATTERY
+from psutil.tests import HAS_SENSORS_FANS
+from psutil.tests import HAS_SENSORS_TEMPERATURES
+from psutil.tests import IS_64BIT
+from psutil.tests import MACOS_12PLUS
+from psutil.tests import PYPY
+from psutil.tests import UNICODE_SUFFIX
+from psutil.tests import PsutilTestCase
+from psutil.tests import check_net_address
+from psutil.tests import enum
+from psutil.tests import mock
+from psutil.tests import retry_on_failure
+
+
+# ===================================================================
+# --- System-related API tests
+# ===================================================================
+
+
+class TestProcessAPIs(PsutilTestCase):
+
+ def test_process_iter(self):
+ self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()])
+ sproc = self.spawn_testproc()
+ self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()])
+ p = psutil.Process(sproc.pid)
+ p.kill()
+ p.wait()
+ self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()])
+
+ with mock.patch('psutil.Process',
+ side_effect=psutil.NoSuchProcess(os.getpid())):
+ self.assertEqual(list(psutil.process_iter()), [])
+ with mock.patch('psutil.Process',
+ side_effect=psutil.AccessDenied(os.getpid())):
+ with self.assertRaises(psutil.AccessDenied):
+ list(psutil.process_iter())
+
+ def test_prcess_iter_w_attrs(self):
+ for p in psutil.process_iter(attrs=['pid']):
+ self.assertEqual(list(p.info.keys()), ['pid'])
+ with self.assertRaises(ValueError):
+ list(psutil.process_iter(attrs=['foo']))
+ with mock.patch("psutil._psplatform.Process.cpu_times",
+ side_effect=psutil.AccessDenied(0, "")) as m:
+ for p in psutil.process_iter(attrs=["pid", "cpu_times"]):
+ self.assertIsNone(p.info['cpu_times'])
+ self.assertGreaterEqual(p.info['pid'], 0)
+ assert m.called
+ with mock.patch("psutil._psplatform.Process.cpu_times",
+ side_effect=psutil.AccessDenied(0, "")) as m:
+ flag = object()
+ for p in psutil.process_iter(
+ attrs=["pid", "cpu_times"], ad_value=flag):
+ self.assertIs(p.info['cpu_times'], flag)
+ self.assertGreaterEqual(p.info['pid'], 0)
+ assert m.called
+
+ @unittest.skipIf(PYPY and WINDOWS,
+ "spawn_testproc() unreliable on PYPY + WINDOWS")
+ def test_wait_procs(self):
+ def callback(p):
+ pids.append(p.pid)
+
+ pids = []
+ sproc1 = self.spawn_testproc()
+ sproc2 = self.spawn_testproc()
+ sproc3 = self.spawn_testproc()
+ procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
+ self.assertRaises(ValueError, psutil.wait_procs, procs, timeout=-1)
+ self.assertRaises(TypeError, psutil.wait_procs, procs, callback=1)
+ t = time.time()
+ gone, alive = psutil.wait_procs(procs, timeout=0.01, callback=callback)
+
+ self.assertLess(time.time() - t, 0.5)
+ self.assertEqual(gone, [])
+ self.assertEqual(len(alive), 3)
+ self.assertEqual(pids, [])
+ for p in alive:
+ self.assertFalse(hasattr(p, 'returncode'))
+
+ @retry_on_failure(30)
+ def test(procs, callback):
+ gone, alive = psutil.wait_procs(procs, timeout=0.03,
+ callback=callback)
+ self.assertEqual(len(gone), 1)
+ self.assertEqual(len(alive), 2)
+ return gone, alive
+
+ sproc3.terminate()
+ gone, alive = test(procs, callback)
+ self.assertIn(sproc3.pid, [x.pid for x in gone])
+ if POSIX:
+ self.assertEqual(gone.pop().returncode, -signal.SIGTERM)
+ else:
+ self.assertEqual(gone.pop().returncode, 1)
+ self.assertEqual(pids, [sproc3.pid])
+ for p in alive:
+ self.assertFalse(hasattr(p, 'returncode'))
+
+ @retry_on_failure(30)
+ def test(procs, callback):
+ gone, alive = psutil.wait_procs(procs, timeout=0.03,
+ callback=callback)
+ self.assertEqual(len(gone), 3)
+ self.assertEqual(len(alive), 0)
+ return gone, alive
+
+ sproc1.terminate()
+ sproc2.terminate()
+ gone, alive = test(procs, callback)
+ self.assertEqual(set(pids), set([sproc1.pid, sproc2.pid, sproc3.pid]))
+ for p in gone:
+ self.assertTrue(hasattr(p, 'returncode'))
+
+ @unittest.skipIf(PYPY and WINDOWS,
+ "spawn_testproc() unreliable on PYPY + WINDOWS")
+ def test_wait_procs_no_timeout(self):
+ sproc1 = self.spawn_testproc()
+ sproc2 = self.spawn_testproc()
+ sproc3 = self.spawn_testproc()
+ procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
+ for p in procs:
+ p.terminate()
+ gone, alive = psutil.wait_procs(procs)
+
+ def test_pid_exists(self):
+ sproc = self.spawn_testproc()
+ self.assertTrue(psutil.pid_exists(sproc.pid))
+ p = psutil.Process(sproc.pid)
+ p.kill()
+ p.wait()
+ self.assertFalse(psutil.pid_exists(sproc.pid))
+ self.assertFalse(psutil.pid_exists(-1))
+ self.assertEqual(psutil.pid_exists(0), 0 in psutil.pids())
+
+ def test_pid_exists_2(self):
+ pids = psutil.pids()
+ for pid in pids:
+ try:
+ assert psutil.pid_exists(pid)
+ except AssertionError:
+ # in case the process disappeared in meantime fail only
+ # if it is no longer in psutil.pids()
+ time.sleep(.1)
+ self.assertNotIn(pid, psutil.pids())
+ pids = range(max(pids) + 5000, max(pids) + 6000)
+ for pid in pids:
+ self.assertFalse(psutil.pid_exists(pid), msg=pid)
+
+
+class TestMiscAPIs(PsutilTestCase):
+
+ def test_boot_time(self):
+ bt = psutil.boot_time()
+ self.assertIsInstance(bt, float)
+ self.assertGreater(bt, 0)
+ self.assertLess(bt, time.time())
+
+ @unittest.skipIf(CI_TESTING and not psutil.users(), "unreliable on CI")
+ def test_users(self):
+ users = psutil.users()
+ self.assertNotEqual(users, [])
+ for user in users:
+ assert user.name, user
+ self.assertIsInstance(user.name, str)
+ self.assertIsInstance(user.terminal, (str, type(None)))
+ if user.host is not None:
+ self.assertIsInstance(user.host, (str, type(None)))
+ user.terminal
+ user.host
+ assert user.started > 0.0, user
+ datetime.datetime.fromtimestamp(user.started)
+ if WINDOWS or OPENBSD:
+ self.assertIsNone(user.pid)
+ else:
+ psutil.Process(user.pid)
+
+ def test_test(self):
+ # test for psutil.test() function
+ stdout = sys.stdout
+ sys.stdout = DEVNULL
+ try:
+ psutil.test()
+ finally:
+ sys.stdout = stdout
+
+ def test_os_constants(self):
+ names = ["POSIX", "WINDOWS", "LINUX", "MACOS", "FREEBSD", "OPENBSD",
+ "NETBSD", "BSD", "SUNOS"]
+ for name in names:
+ self.assertIsInstance(getattr(psutil, name), bool, msg=name)
+
+ if os.name == 'posix':
+ assert psutil.POSIX
+ assert not psutil.WINDOWS
+ names.remove("POSIX")
+ if "linux" in sys.platform.lower():
+ assert psutil.LINUX
+ names.remove("LINUX")
+ elif "bsd" in sys.platform.lower():
+ assert psutil.BSD
+ self.assertEqual([psutil.FREEBSD, psutil.OPENBSD,
+ psutil.NETBSD].count(True), 1)
+ names.remove("BSD")
+ names.remove("FREEBSD")
+ names.remove("OPENBSD")
+ names.remove("NETBSD")
+ elif "sunos" in sys.platform.lower() or \
+ "solaris" in sys.platform.lower():
+ assert psutil.SUNOS
+ names.remove("SUNOS")
+ elif "darwin" in sys.platform.lower():
+ assert psutil.MACOS
+ names.remove("MACOS")
+ else:
+ assert psutil.WINDOWS
+ assert not psutil.POSIX
+ names.remove("WINDOWS")
+
+ # assert all other constants are set to False
+ for name in names:
+ self.assertIs(getattr(psutil, name), False, msg=name)
+
+
+class TestMemoryAPIs(PsutilTestCase):
+
+ def test_virtual_memory(self):
+ mem = psutil.virtual_memory()
+ assert mem.total > 0, mem
+ assert mem.available > 0, mem
+ assert 0 <= mem.percent <= 100, mem
+ assert mem.used > 0, mem
+ assert mem.free >= 0, mem
+ for name in mem._fields:
+ value = getattr(mem, name)
+ if name != 'percent':
+ self.assertIsInstance(value, (int, long))
+ if name != 'total':
+ if not value >= 0:
+ raise self.fail("%r < 0 (%s)" % (name, value))
+ if value > mem.total:
+ raise self.fail("%r > total (total=%s, %s=%s)"
+ % (name, mem.total, name, value))
+
+ def test_swap_memory(self):
+ mem = psutil.swap_memory()
+ self.assertEqual(
+ mem._fields, ('total', 'used', 'free', 'percent', 'sin', 'sout'))
+
+ assert mem.total >= 0, mem
+ assert mem.used >= 0, mem
+ if mem.total > 0:
+ # likely a system with no swap partition
+ assert mem.free > 0, mem
+ else:
+ assert mem.free == 0, mem
+ assert 0 <= mem.percent <= 100, mem
+ assert mem.sin >= 0, mem
+ assert mem.sout >= 0, mem
+
+
+class TestCpuAPIs(PsutilTestCase):
+
+ def test_cpu_count_logical(self):
+ logical = psutil.cpu_count()
+ self.assertIsNotNone(logical)
+ self.assertEqual(logical, len(psutil.cpu_times(percpu=True)))
+ self.assertGreaterEqual(logical, 1)
+ #
+ if os.path.exists("/proc/cpuinfo"):
+ with open("/proc/cpuinfo") as fd:
+ cpuinfo_data = fd.read()
+ if "physical id" not in cpuinfo_data:
+ raise unittest.SkipTest("cpuinfo doesn't include physical id")
+
+ def test_cpu_count_cores(self):
+ logical = psutil.cpu_count()
+ cores = psutil.cpu_count(logical=False)
+ if cores is None:
+ raise self.skipTest("cpu_count_cores() is None")
+ if WINDOWS and sys.getwindowsversion()[:2] <= (6, 1): # <= Vista
+ self.assertIsNone(cores)
+ else:
+ self.assertGreaterEqual(cores, 1)
+ self.assertGreaterEqual(logical, cores)
+
+ def test_cpu_count_none(self):
+ # https://github.com/giampaolo/psutil/issues/1085
+ for val in (-1, 0, None):
+ with mock.patch('psutil._psplatform.cpu_count_logical',
+ return_value=val) as m:
+ self.assertIsNone(psutil.cpu_count())
+ assert m.called
+ with mock.patch('psutil._psplatform.cpu_count_cores',
+ return_value=val) as m:
+ self.assertIsNone(psutil.cpu_count(logical=False))
+ assert m.called
+
+ def test_cpu_times(self):
+ # Check type, value >= 0, str().
+ total = 0
+ times = psutil.cpu_times()
+ sum(times)
+ for cp_time in times:
+ self.assertIsInstance(cp_time, float)
+ self.assertGreaterEqual(cp_time, 0.0)
+ total += cp_time
+ self.assertEqual(total, sum(times))
+ str(times)
+ # CPU times are always supposed to increase over time
+ # or at least remain the same and that's because time
+ # cannot go backwards.
+ # Surprisingly sometimes this might not be the case (at
+ # least on Windows and Linux), see:
+ # https://github.com/giampaolo/psutil/issues/392
+ # https://github.com/giampaolo/psutil/issues/645
+ # if not WINDOWS:
+ # last = psutil.cpu_times()
+ # for x in range(100):
+ # new = psutil.cpu_times()
+ # for field in new._fields:
+ # new_t = getattr(new, field)
+ # last_t = getattr(last, field)
+ # self.assertGreaterEqual(new_t, last_t,
+ # msg="%s %s" % (new_t, last_t))
+ # last = new
+
+ def test_cpu_times_time_increases(self):
+ # Make sure time increases between calls.
+ t1 = sum(psutil.cpu_times())
+ stop_at = time.time() + GLOBAL_TIMEOUT
+ while time.time() < stop_at:
+ t2 = sum(psutil.cpu_times())
+ if t2 > t1:
+ return
+ raise self.fail("time remained the same")
+
+ def test_per_cpu_times(self):
+ # Check type, value >= 0, str().
+ for times in psutil.cpu_times(percpu=True):
+ total = 0
+ sum(times)
+ for cp_time in times:
+ self.assertIsInstance(cp_time, float)
+ self.assertGreaterEqual(cp_time, 0.0)
+ total += cp_time
+ self.assertEqual(total, sum(times))
+ str(times)
+ self.assertEqual(len(psutil.cpu_times(percpu=True)[0]),
+ len(psutil.cpu_times(percpu=False)))
+
+ # Note: in theory CPU times are always supposed to increase over
+ # time or remain the same but never go backwards. In practice
+ # sometimes this is not the case.
+ # This issue seemd to be afflict Windows:
+ # https://github.com/giampaolo/psutil/issues/392
+ # ...but it turns out also Linux (rarely) behaves the same.
+ # last = psutil.cpu_times(percpu=True)
+ # for x in range(100):
+ # new = psutil.cpu_times(percpu=True)
+ # for index in range(len(new)):
+ # newcpu = new[index]
+ # lastcpu = last[index]
+ # for field in newcpu._fields:
+ # new_t = getattr(newcpu, field)
+ # last_t = getattr(lastcpu, field)
+ # self.assertGreaterEqual(
+ # new_t, last_t, msg="%s %s" % (lastcpu, newcpu))
+ # last = new
+
+ def test_per_cpu_times_2(self):
+ # Simulate some work load then make sure time have increased
+ # between calls.
+ tot1 = psutil.cpu_times(percpu=True)
+ giveup_at = time.time() + GLOBAL_TIMEOUT
+ while True:
+ if time.time() >= giveup_at:
+ return self.fail("timeout")
+ tot2 = psutil.cpu_times(percpu=True)
+ for t1, t2 in zip(tot1, tot2):
+ t1, t2 = psutil._cpu_busy_time(t1), psutil._cpu_busy_time(t2)
+ difference = t2 - t1
+ if difference >= 0.05:
+ return
+
+ def test_cpu_times_comparison(self):
+ # Make sure the sum of all per cpu times is almost equal to
+ # base "one cpu" times.
+ base = psutil.cpu_times()
+ per_cpu = psutil.cpu_times(percpu=True)
+ summed_values = base._make([sum(num) for num in zip(*per_cpu)])
+ for field in base._fields:
+ self.assertAlmostEqual(
+ getattr(base, field), getattr(summed_values, field), delta=1)
+
+ def _test_cpu_percent(self, percent, last_ret, new_ret):
+ try:
+ self.assertIsInstance(percent, float)
+ self.assertGreaterEqual(percent, 0.0)
+ self.assertIsNot(percent, -0.0)
+ self.assertLessEqual(percent, 100.0 * psutil.cpu_count())
+ except AssertionError as err:
+ raise AssertionError("\n%s\nlast=%s\nnew=%s" % (
+ err, pprint.pformat(last_ret), pprint.pformat(new_ret)))
+
+ def test_cpu_percent(self):
+ last = psutil.cpu_percent(interval=0.001)
+ for x in range(100):
+ new = psutil.cpu_percent(interval=None)
+ self._test_cpu_percent(new, last, new)
+ last = new
+ with self.assertRaises(ValueError):
+ psutil.cpu_percent(interval=-1)
+
+ def test_per_cpu_percent(self):
+ last = psutil.cpu_percent(interval=0.001, percpu=True)
+ self.assertEqual(len(last), psutil.cpu_count())
+ for x in range(100):
+ new = psutil.cpu_percent(interval=None, percpu=True)
+ for percent in new:
+ self._test_cpu_percent(percent, last, new)
+ last = new
+ with self.assertRaises(ValueError):
+ psutil.cpu_percent(interval=-1, percpu=True)
+
+ def test_cpu_times_percent(self):
+ last = psutil.cpu_times_percent(interval=0.001)
+ for x in range(100):
+ new = psutil.cpu_times_percent(interval=None)
+ for percent in new:
+ self._test_cpu_percent(percent, last, new)
+ self._test_cpu_percent(sum(new), last, new)
+ last = new
+ with self.assertRaises(ValueError):
+ psutil.cpu_times_percent(interval=-1)
+
+ def test_per_cpu_times_percent(self):
+ last = psutil.cpu_times_percent(interval=0.001, percpu=True)
+ self.assertEqual(len(last), psutil.cpu_count())
+ for x in range(100):
+ new = psutil.cpu_times_percent(interval=None, percpu=True)
+ for cpu in new:
+ for percent in cpu:
+ self._test_cpu_percent(percent, last, new)
+ self._test_cpu_percent(sum(cpu), last, new)
+ last = new
+
+ def test_per_cpu_times_percent_negative(self):
+ # see: https://github.com/giampaolo/psutil/issues/645
+ psutil.cpu_times_percent(percpu=True)
+ zero_times = [x._make([0 for x in range(len(x._fields))])
+ for x in psutil.cpu_times(percpu=True)]
+ with mock.patch('psutil.cpu_times', return_value=zero_times):
+ for cpu in psutil.cpu_times_percent(percpu=True):
+ for percent in cpu:
+ self._test_cpu_percent(percent, None, None)
+
+ def test_cpu_stats(self):
+ # Tested more extensively in per-platform test modules.
+ infos = psutil.cpu_stats()
+ self.assertEqual(
+ infos._fields,
+ ('ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls'))
+ for name in infos._fields:
+ value = getattr(infos, name)
+ self.assertGreaterEqual(value, 0)
+ # on AIX, ctx_switches is always 0
+ if not AIX and name in ('ctx_switches', 'interrupts'):
+ self.assertGreater(value, 0)
+
+ # TODO: remove this once 1892 is fixed
+ @unittest.skipIf(MACOS and platform.machine() == 'arm64',
+ "skipped due to #1892")
+ @unittest.skipIf(not HAS_CPU_FREQ, "not supported")
+ def test_cpu_freq(self):
+ def check_ls(ls):
+ for nt in ls:
+ self.assertEqual(nt._fields, ('current', 'min', 'max'))
+ if nt.max != 0.0:
+ self.assertLessEqual(nt.current, nt.max)
+ for name in nt._fields:
+ value = getattr(nt, name)
+ self.assertIsInstance(value, (int, long, float))
+ self.assertGreaterEqual(value, 0)
+
+ ls = psutil.cpu_freq(percpu=True)
+ if FREEBSD and not ls:
+ raise self.skipTest("returns empty list on FreeBSD")
+
+ assert ls, ls
+ check_ls([psutil.cpu_freq(percpu=False)])
+
+ if LINUX:
+ self.assertEqual(len(ls), psutil.cpu_count())
+
+ @unittest.skipIf(not HAS_GETLOADAVG, "not supported")
+ def test_getloadavg(self):
+ loadavg = psutil.getloadavg()
+ self.assertEqual(len(loadavg), 3)
+ for load in loadavg:
+ self.assertIsInstance(load, float)
+ self.assertGreaterEqual(load, 0.0)
+
+
+class TestDiskAPIs(PsutilTestCase):
+
+ @unittest.skipIf(PYPY and not IS_64BIT, "unreliable on PYPY32 + 32BIT")
+ def test_disk_usage(self):
+ usage = psutil.disk_usage(os.getcwd())
+ self.assertEqual(usage._fields, ('total', 'used', 'free', 'percent'))
+
+ assert usage.total > 0, usage
+ assert usage.used > 0, usage
+ assert usage.free > 0, usage
+ assert usage.total > usage.used, usage
+ assert usage.total > usage.free, usage
+ assert 0 <= usage.percent <= 100, usage.percent
+ if hasattr(shutil, 'disk_usage'):
+ # py >= 3.3, see: http://bugs.python.org/issue12442
+ shutil_usage = shutil.disk_usage(os.getcwd())
+ tolerance = 5 * 1024 * 1024 # 5MB
+ self.assertEqual(usage.total, shutil_usage.total)
+ self.assertAlmostEqual(usage.free, shutil_usage.free,
+ delta=tolerance)
+ if not MACOS_12PLUS:
+ # see https://github.com/giampaolo/psutil/issues/2147
+ self.assertAlmostEqual(usage.used, shutil_usage.used,
+ delta=tolerance)
+
+ # if path does not exist OSError ENOENT is expected across
+ # all platforms
+ fname = self.get_testfn()
+ with self.assertRaises(FileNotFoundError):
+ psutil.disk_usage(fname)
+
+ @unittest.skipIf(not ASCII_FS, "not an ASCII fs")
+ def test_disk_usage_unicode(self):
+ # See: https://github.com/giampaolo/psutil/issues/416
+ with self.assertRaises(UnicodeEncodeError):
+ psutil.disk_usage(UNICODE_SUFFIX)
+
+ def test_disk_usage_bytes(self):
+ psutil.disk_usage(b'.')
+
+ def test_disk_partitions(self):
+ def check_ntuple(nt):
+ self.assertIsInstance(nt.device, str)
+ self.assertIsInstance(nt.mountpoint, str)
+ self.assertIsInstance(nt.fstype, str)
+ self.assertIsInstance(nt.opts, str)
+ self.assertIsInstance(nt.maxfile, (int, type(None)))
+ self.assertIsInstance(nt.maxpath, (int, type(None)))
+ if nt.maxfile is not None and not GITHUB_ACTIONS:
+ self.assertGreater(nt.maxfile, 0)
+ if nt.maxpath is not None:
+ self.assertGreater(nt.maxpath, 0)
+
+ # all = False
+ ls = psutil.disk_partitions(all=False)
+ self.assertTrue(ls, msg=ls)
+ for disk in ls:
+ check_ntuple(disk)
+ if WINDOWS and 'cdrom' in disk.opts:
+ continue
+ if not POSIX:
+ assert os.path.exists(disk.device), disk
+ else:
+ # we cannot make any assumption about this, see:
+ # http://goo.gl/p9c43
+ disk.device
+ # on modern systems mount points can also be files
+ assert os.path.exists(disk.mountpoint), disk
+ assert disk.fstype, disk
+
+ # all = True
+ ls = psutil.disk_partitions(all=True)
+ self.assertTrue(ls, msg=ls)
+ for disk in psutil.disk_partitions(all=True):
+ check_ntuple(disk)
+ if not WINDOWS and disk.mountpoint:
+ try:
+ os.stat(disk.mountpoint)
+ except OSError as err:
+ if GITHUB_ACTIONS and MACOS and err.errno == errno.EIO:
+ continue
+ # http://mail.python.org/pipermail/python-dev/
+ # 2012-June/120787.html
+ if err.errno not in (errno.EPERM, errno.EACCES):
+ raise
+ else:
+ assert os.path.exists(disk.mountpoint), disk
+
+ # ---
+
+ def find_mount_point(path):
+ path = os.path.abspath(path)
+ while not os.path.ismount(path):
+ path = os.path.dirname(path)
+ return path.lower()
+
+ mount = find_mount_point(__file__)
+ mounts = [x.mountpoint.lower() for x in
+ psutil.disk_partitions(all=True) if x.mountpoint]
+ self.assertIn(mount, mounts)
+
+ @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
+ '/proc/diskstats not available on this linux version')
+ @unittest.skipIf(CI_TESTING and not psutil.disk_io_counters(),
+ "unreliable on CI") # no visible disks
+ def test_disk_io_counters(self):
+ def check_ntuple(nt):
+ self.assertEqual(nt[0], nt.read_count)
+ self.assertEqual(nt[1], nt.write_count)
+ self.assertEqual(nt[2], nt.read_bytes)
+ self.assertEqual(nt[3], nt.write_bytes)
+ if not (OPENBSD or NETBSD):
+ self.assertEqual(nt[4], nt.read_time)
+ self.assertEqual(nt[5], nt.write_time)
+ if LINUX:
+ self.assertEqual(nt[6], nt.read_merged_count)
+ self.assertEqual(nt[7], nt.write_merged_count)
+ self.assertEqual(nt[8], nt.busy_time)
+ elif FREEBSD:
+ self.assertEqual(nt[6], nt.busy_time)
+ for name in nt._fields:
+ assert getattr(nt, name) >= 0, nt
+
+ ret = psutil.disk_io_counters(perdisk=False)
+ assert ret is not None, "no disks on this system?"
+ check_ntuple(ret)
+ ret = psutil.disk_io_counters(perdisk=True)
+ # make sure there are no duplicates
+ self.assertEqual(len(ret), len(set(ret)))
+ for key in ret:
+ assert key, key
+ check_ntuple(ret[key])
+
+ def test_disk_io_counters_no_disks(self):
+ # Emulate a case where no disks are installed, see:
+ # https://github.com/giampaolo/psutil/issues/1062
+ with mock.patch('psutil._psplatform.disk_io_counters',
+ return_value={}) as m:
+ self.assertIsNone(psutil.disk_io_counters(perdisk=False))
+ self.assertEqual(psutil.disk_io_counters(perdisk=True), {})
+ assert m.called
+
+
+class TestNetAPIs(PsutilTestCase):
+
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
+ def test_net_io_counters(self):
+ def check_ntuple(nt):
+ self.assertEqual(nt[0], nt.bytes_sent)
+ self.assertEqual(nt[1], nt.bytes_recv)
+ self.assertEqual(nt[2], nt.packets_sent)
+ self.assertEqual(nt[3], nt.packets_recv)
+ self.assertEqual(nt[4], nt.errin)
+ self.assertEqual(nt[5], nt.errout)
+ self.assertEqual(nt[6], nt.dropin)
+ self.assertEqual(nt[7], nt.dropout)
+ assert nt.bytes_sent >= 0, nt
+ assert nt.bytes_recv >= 0, nt
+ assert nt.packets_sent >= 0, nt
+ assert nt.packets_recv >= 0, nt
+ assert nt.errin >= 0, nt
+ assert nt.errout >= 0, nt
+ assert nt.dropin >= 0, nt
+ assert nt.dropout >= 0, nt
+
+ ret = psutil.net_io_counters(pernic=False)
+ check_ntuple(ret)
+ ret = psutil.net_io_counters(pernic=True)
+ self.assertNotEqual(ret, [])
+ for key in ret:
+ self.assertTrue(key)
+ self.assertIsInstance(key, str)
+ check_ntuple(ret[key])
+
+ @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
+ def test_net_io_counters_no_nics(self):
+ # Emulate a case where no NICs are installed, see:
+ # https://github.com/giampaolo/psutil/issues/1062
+ with mock.patch('psutil._psplatform.net_io_counters',
+ return_value={}) as m:
+ self.assertIsNone(psutil.net_io_counters(pernic=False))
+ self.assertEqual(psutil.net_io_counters(pernic=True), {})
+ assert m.called
+
+ def test_net_if_addrs(self):
+ nics = psutil.net_if_addrs()
+ assert nics, nics
+
+ nic_stats = psutil.net_if_stats()
+
+ # Not reliable on all platforms (net_if_addrs() reports more
+ # interfaces).
+ # self.assertEqual(sorted(nics.keys()),
+ # sorted(psutil.net_io_counters(pernic=True).keys()))
+
+ families = set([socket.AF_INET, socket.AF_INET6, psutil.AF_LINK])
+ for nic, addrs in nics.items():
+ self.assertIsInstance(nic, str)
+ self.assertEqual(len(set(addrs)), len(addrs))
+ for addr in addrs:
+ self.assertIsInstance(addr.family, int)
+ self.assertIsInstance(addr.address, str)
+ self.assertIsInstance(addr.netmask, (str, type(None)))
+ self.assertIsInstance(addr.broadcast, (str, type(None)))
+ self.assertIn(addr.family, families)
+ if sys.version_info >= (3, 4) and not PYPY:
+ self.assertIsInstance(addr.family, enum.IntEnum)
+ if nic_stats[nic].isup:
+ # Do not test binding to addresses of interfaces
+ # that are down
+ if addr.family == socket.AF_INET:
+ s = socket.socket(addr.family)
+ with contextlib.closing(s):
+ s.bind((addr.address, 0))
+ elif addr.family == socket.AF_INET6:
+ info = socket.getaddrinfo(
+ addr.address, 0, socket.AF_INET6,
+ socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0]
+ af, socktype, proto, canonname, sa = info
+ s = socket.socket(af, socktype, proto)
+ with contextlib.closing(s):
+ s.bind(sa)
+ for ip in (addr.address, addr.netmask, addr.broadcast,
+ addr.ptp):
+ if ip is not None:
+ # TODO: skip AF_INET6 for now because I get:
+ # AddressValueError: Only hex digits permitted in
+ # u'c6f3%lxcbr0' in u'fe80::c8e0:fff:fe54:c6f3%lxcbr0'
+ if addr.family != socket.AF_INET6:
+ check_net_address(ip, addr.family)
+ # broadcast and ptp addresses are mutually exclusive
+ if addr.broadcast:
+ self.assertIsNone(addr.ptp)
+ elif addr.ptp:
+ self.assertIsNone(addr.broadcast)
+
+ if BSD or MACOS or SUNOS:
+ if hasattr(socket, "AF_LINK"):
+ self.assertEqual(psutil.AF_LINK, socket.AF_LINK)
+ elif LINUX:
+ self.assertEqual(psutil.AF_LINK, socket.AF_PACKET)
+ elif WINDOWS:
+ self.assertEqual(psutil.AF_LINK, -1)
+
+ def test_net_if_addrs_mac_null_bytes(self):
+ # Simulate that the underlying C function returns an incomplete
+ # MAC address. psutil is supposed to fill it with null bytes.
+ # https://github.com/giampaolo/psutil/issues/786
+ if POSIX:
+ ret = [('em1', psutil.AF_LINK, '06:3d:29', None, None, None)]
+ else:
+ ret = [('em1', -1, '06-3d-29', None, None, None)]
+ with mock.patch('psutil._psplatform.net_if_addrs',
+ return_value=ret) as m:
+ addr = psutil.net_if_addrs()['em1'][0]
+ assert m.called
+ if POSIX:
+ self.assertEqual(addr.address, '06:3d:29:00:00:00')
+ else:
+ self.assertEqual(addr.address, '06-3d-29-00-00-00')
+
+ def test_net_if_stats(self):
+ nics = psutil.net_if_stats()
+ assert nics, nics
+ all_duplexes = (psutil.NIC_DUPLEX_FULL,
+ psutil.NIC_DUPLEX_HALF,
+ psutil.NIC_DUPLEX_UNKNOWN)
+ for name, stats in nics.items():
+ self.assertIsInstance(name, str)
+ isup, duplex, speed, mtu, flags = stats
+ self.assertIsInstance(isup, bool)
+ self.assertIn(duplex, all_duplexes)
+ self.assertIn(duplex, all_duplexes)
+ self.assertGreaterEqual(speed, 0)
+ self.assertGreaterEqual(mtu, 0)
+ self.assertIsInstance(flags, str)
+
+ @unittest.skipIf(not (LINUX or BSD or MACOS),
+ "LINUX or BSD or MACOS specific")
+ def test_net_if_stats_enodev(self):
+ # See: https://github.com/giampaolo/psutil/issues/1279
+ with mock.patch('psutil._psutil_posix.net_if_mtu',
+ side_effect=OSError(errno.ENODEV, "")) as m:
+ ret = psutil.net_if_stats()
+ self.assertEqual(ret, {})
+ assert m.called
+
+
+class TestSensorsAPIs(PsutilTestCase):
+
+ @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
+ def test_sensors_temperatures(self):
+ temps = psutil.sensors_temperatures()
+ for name, entries in temps.items():
+ self.assertIsInstance(name, str)
+ for entry in entries:
+ self.assertIsInstance(entry.label, str)
+ if entry.current is not None:
+ self.assertGreaterEqual(entry.current, 0)
+ if entry.high is not None:
+ self.assertGreaterEqual(entry.high, 0)
+ if entry.critical is not None:
+ self.assertGreaterEqual(entry.critical, 0)
+
+ @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
+ def test_sensors_temperatures_fahreneit(self):
+ d = {'coretemp': [('label', 50.0, 60.0, 70.0)]}
+ with mock.patch("psutil._psplatform.sensors_temperatures",
+ return_value=d) as m:
+ temps = psutil.sensors_temperatures(
+ fahrenheit=True)['coretemp'][0]
+ assert m.called
+ self.assertEqual(temps.current, 122.0)
+ self.assertEqual(temps.high, 140.0)
+ self.assertEqual(temps.critical, 158.0)
+
+ @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_sensors_battery(self):
+ ret = psutil.sensors_battery()
+ self.assertGreaterEqual(ret.percent, 0)
+ self.assertLessEqual(ret.percent, 100)
+ if ret.secsleft not in (psutil.POWER_TIME_UNKNOWN,
+ psutil.POWER_TIME_UNLIMITED):
+ self.assertGreaterEqual(ret.secsleft, 0)
+ else:
+ if ret.secsleft == psutil.POWER_TIME_UNLIMITED:
+ self.assertTrue(ret.power_plugged)
+ self.assertIsInstance(ret.power_plugged, bool)
+
+ @unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
+ def test_sensors_fans(self):
+ fans = psutil.sensors_fans()
+ for name, entries in fans.items():
+ self.assertIsInstance(name, str)
+ for entry in entries:
+ self.assertIsInstance(entry.label, str)
+ self.assertIsInstance(entry.current, (int, long))
+ self.assertGreaterEqual(entry.current, 0)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_testutils.py b/lib/psutil/tests/test_testutils.py
new file mode 100644
index 0000000..dd98538
--- /dev/null
+++ b/lib/psutil/tests/test_testutils.py
@@ -0,0 +1,441 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests for testing utils (psutil.tests namespace).
+"""
+
+import collections
+import contextlib
+import errno
+import os
+import socket
+import stat
+import subprocess
+import unittest
+
+import psutil
+import psutil.tests
+from psutil import FREEBSD
+from psutil import NETBSD
+from psutil import POSIX
+from psutil._common import open_binary
+from psutil._common import open_text
+from psutil._common import supports_ipv6
+from psutil.tests import CI_TESTING
+from psutil.tests import HAS_CONNECTIONS_UNIX
+from psutil.tests import PYTHON_EXE
+from psutil.tests import PsutilTestCase
+from psutil.tests import TestMemoryLeak
+from psutil.tests import bind_socket
+from psutil.tests import bind_unix_socket
+from psutil.tests import call_until
+from psutil.tests import chdir
+from psutil.tests import create_sockets
+from psutil.tests import get_free_port
+from psutil.tests import is_namedtuple
+from psutil.tests import mock
+from psutil.tests import process_namespace
+from psutil.tests import reap_children
+from psutil.tests import retry
+from psutil.tests import retry_on_failure
+from psutil.tests import safe_mkdir
+from psutil.tests import safe_rmpath
+from psutil.tests import serialrun
+from psutil.tests import system_namespace
+from psutil.tests import tcp_socketpair
+from psutil.tests import terminate
+from psutil.tests import unix_socketpair
+from psutil.tests import wait_for_file
+from psutil.tests import wait_for_pid
+
+
+# ===================================================================
+# --- Unit tests for test utilities.
+# ===================================================================
+
+
+class TestRetryDecorator(PsutilTestCase):
+
+ @mock.patch('time.sleep')
+ def test_retry_success(self, sleep):
+ # Fail 3 times out of 5; make sure the decorated fun returns.
+
+ @retry(retries=5, interval=1, logfun=None)
+ def foo():
+ while queue:
+ queue.pop()
+ 1 / 0
+ return 1
+
+ queue = list(range(3))
+ self.assertEqual(foo(), 1)
+ self.assertEqual(sleep.call_count, 3)
+
+ @mock.patch('time.sleep')
+ def test_retry_failure(self, sleep):
+ # Fail 6 times out of 5; th function is supposed to raise exc.
+ @retry(retries=5, interval=1, logfun=None)
+ def foo():
+ while queue:
+ queue.pop()
+ 1 / 0
+ return 1
+
+ queue = list(range(6))
+ self.assertRaises(ZeroDivisionError, foo)
+ self.assertEqual(sleep.call_count, 5)
+
+ @mock.patch('time.sleep')
+ def test_exception_arg(self, sleep):
+ @retry(exception=ValueError, interval=1)
+ def foo():
+ raise TypeError
+
+ self.assertRaises(TypeError, foo)
+ self.assertEqual(sleep.call_count, 0)
+
+ @mock.patch('time.sleep')
+ def test_no_interval_arg(self, sleep):
+ # if interval is not specified sleep is not supposed to be called
+
+ @retry(retries=5, interval=None, logfun=None)
+ def foo():
+ 1 / 0
+
+ self.assertRaises(ZeroDivisionError, foo)
+ self.assertEqual(sleep.call_count, 0)
+
+ @mock.patch('time.sleep')
+ def test_retries_arg(self, sleep):
+
+ @retry(retries=5, interval=1, logfun=None)
+ def foo():
+ 1 / 0
+
+ self.assertRaises(ZeroDivisionError, foo)
+ self.assertEqual(sleep.call_count, 5)
+
+ @mock.patch('time.sleep')
+ def test_retries_and_timeout_args(self, sleep):
+ self.assertRaises(ValueError, retry, retries=5, timeout=1)
+
+
+class TestSyncTestUtils(PsutilTestCase):
+
+ def test_wait_for_pid(self):
+ wait_for_pid(os.getpid())
+ nopid = max(psutil.pids()) + 99999
+ with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
+ self.assertRaises(psutil.NoSuchProcess, wait_for_pid, nopid)
+
+ def test_wait_for_file(self):
+ testfn = self.get_testfn()
+ with open(testfn, 'w') as f:
+ f.write('foo')
+ wait_for_file(testfn)
+ assert not os.path.exists(testfn)
+
+ def test_wait_for_file_empty(self):
+ testfn = self.get_testfn()
+ with open(testfn, 'w'):
+ pass
+ wait_for_file(testfn, empty=True)
+ assert not os.path.exists(testfn)
+
+ def test_wait_for_file_no_file(self):
+ testfn = self.get_testfn()
+ with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
+ self.assertRaises(IOError, wait_for_file, testfn)
+
+ def test_wait_for_file_no_delete(self):
+ testfn = self.get_testfn()
+ with open(testfn, 'w') as f:
+ f.write('foo')
+ wait_for_file(testfn, delete=False)
+ assert os.path.exists(testfn)
+
+ def test_call_until(self):
+ ret = call_until(lambda: 1, "ret == 1")
+ self.assertEqual(ret, 1)
+
+
+class TestFSTestUtils(PsutilTestCase):
+
+ def test_open_text(self):
+ with open_text(__file__) as f:
+ self.assertEqual(f.mode, 'rt')
+
+ def test_open_binary(self):
+ with open_binary(__file__) as f:
+ self.assertEqual(f.mode, 'rb')
+
+ def test_safe_mkdir(self):
+ testfn = self.get_testfn()
+ safe_mkdir(testfn)
+ assert os.path.isdir(testfn)
+ safe_mkdir(testfn)
+ assert os.path.isdir(testfn)
+
+ def test_safe_rmpath(self):
+ # test file is removed
+ testfn = self.get_testfn()
+ open(testfn, 'w').close()
+ safe_rmpath(testfn)
+ assert not os.path.exists(testfn)
+ # test no exception if path does not exist
+ safe_rmpath(testfn)
+ # test dir is removed
+ os.mkdir(testfn)
+ safe_rmpath(testfn)
+ assert not os.path.exists(testfn)
+ # test other exceptions are raised
+ with mock.patch('psutil.tests.os.stat',
+ side_effect=OSError(errno.EINVAL, "")) as m:
+ with self.assertRaises(OSError):
+ safe_rmpath(testfn)
+ assert m.called
+
+ def test_chdir(self):
+ testfn = self.get_testfn()
+ base = os.getcwd()
+ os.mkdir(testfn)
+ with chdir(testfn):
+ self.assertEqual(os.getcwd(), os.path.join(base, testfn))
+ self.assertEqual(os.getcwd(), base)
+
+
+class TestProcessUtils(PsutilTestCase):
+
+ def test_reap_children(self):
+ subp = self.spawn_testproc()
+ p = psutil.Process(subp.pid)
+ assert p.is_running()
+ reap_children()
+ assert not p.is_running()
+ assert not psutil.tests._pids_started
+ assert not psutil.tests._subprocesses_started
+
+ def test_spawn_children_pair(self):
+ child, grandchild = self.spawn_children_pair()
+ self.assertNotEqual(child.pid, grandchild.pid)
+ assert child.is_running()
+ assert grandchild.is_running()
+ children = psutil.Process().children()
+ self.assertEqual(children, [child])
+ children = psutil.Process().children(recursive=True)
+ self.assertEqual(len(children), 2)
+ self.assertIn(child, children)
+ self.assertIn(grandchild, children)
+ self.assertEqual(child.ppid(), os.getpid())
+ self.assertEqual(grandchild.ppid(), child.pid)
+
+ terminate(child)
+ assert not child.is_running()
+ assert grandchild.is_running()
+
+ terminate(grandchild)
+ assert not grandchild.is_running()
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ def test_spawn_zombie(self):
+ parent, zombie = self.spawn_zombie()
+ self.assertEqual(zombie.status(), psutil.STATUS_ZOMBIE)
+
+ def test_terminate(self):
+ # by subprocess.Popen
+ p = self.spawn_testproc()
+ terminate(p)
+ self.assertProcessGone(p)
+ terminate(p)
+ # by psutil.Process
+ p = psutil.Process(self.spawn_testproc().pid)
+ terminate(p)
+ self.assertProcessGone(p)
+ terminate(p)
+ # by psutil.Popen
+ cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
+ p = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ terminate(p)
+ self.assertProcessGone(p)
+ terminate(p)
+ # by PID
+ pid = self.spawn_testproc().pid
+ terminate(pid)
+ self.assertProcessGone(p)
+ terminate(pid)
+ # zombie
+ if POSIX:
+ parent, zombie = self.spawn_zombie()
+ terminate(parent)
+ terminate(zombie)
+ self.assertProcessGone(parent)
+ self.assertProcessGone(zombie)
+
+
+class TestNetUtils(PsutilTestCase):
+
+ def bind_socket(self):
+ port = get_free_port()
+ with contextlib.closing(bind_socket(addr=('', port))) as s:
+ self.assertEqual(s.getsockname()[1], port)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ def test_bind_unix_socket(self):
+ name = self.get_testfn()
+ sock = bind_unix_socket(name)
+ with contextlib.closing(sock):
+ self.assertEqual(sock.family, socket.AF_UNIX)
+ self.assertEqual(sock.type, socket.SOCK_STREAM)
+ self.assertEqual(sock.getsockname(), name)
+ assert os.path.exists(name)
+ assert stat.S_ISSOCK(os.stat(name).st_mode)
+ # UDP
+ name = self.get_testfn()
+ sock = bind_unix_socket(name, type=socket.SOCK_DGRAM)
+ with contextlib.closing(sock):
+ self.assertEqual(sock.type, socket.SOCK_DGRAM)
+
+ def tcp_tcp_socketpair(self):
+ addr = ("127.0.0.1", get_free_port())
+ server, client = tcp_socketpair(socket.AF_INET, addr=addr)
+ with contextlib.closing(server):
+ with contextlib.closing(client):
+ # Ensure they are connected and the positions are
+ # correct.
+ self.assertEqual(server.getsockname(), addr)
+ self.assertEqual(client.getpeername(), addr)
+ self.assertNotEqual(client.getsockname(), addr)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @unittest.skipIf(NETBSD or FREEBSD,
+ "/var/run/log UNIX socket opened by default")
+ def test_unix_socketpair(self):
+ p = psutil.Process()
+ num_fds = p.num_fds()
+ assert not p.connections(kind='unix')
+ name = self.get_testfn()
+ server, client = unix_socketpair(name)
+ try:
+ assert os.path.exists(name)
+ assert stat.S_ISSOCK(os.stat(name).st_mode)
+ self.assertEqual(p.num_fds() - num_fds, 2)
+ self.assertEqual(len(p.connections(kind='unix')), 2)
+ self.assertEqual(server.getsockname(), name)
+ self.assertEqual(client.getpeername(), name)
+ finally:
+ client.close()
+ server.close()
+
+ def test_create_sockets(self):
+ with create_sockets() as socks:
+ fams = collections.defaultdict(int)
+ types = collections.defaultdict(int)
+ for s in socks:
+ fams[s.family] += 1
+ # work around http://bugs.python.org/issue30204
+ types[s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)] += 1
+ self.assertGreaterEqual(fams[socket.AF_INET], 2)
+ if supports_ipv6():
+ self.assertGreaterEqual(fams[socket.AF_INET6], 2)
+ if POSIX and HAS_CONNECTIONS_UNIX:
+ self.assertGreaterEqual(fams[socket.AF_UNIX], 2)
+ self.assertGreaterEqual(types[socket.SOCK_STREAM], 2)
+ self.assertGreaterEqual(types[socket.SOCK_DGRAM], 2)
+
+
+@serialrun
+class TestMemLeakClass(TestMemoryLeak):
+
+ @retry_on_failure()
+ def test_times(self):
+ def fun():
+ cnt['cnt'] += 1
+ cnt = {'cnt': 0}
+ self.execute(fun, times=10, warmup_times=15)
+ self.assertEqual(cnt['cnt'], 26)
+
+ def test_param_err(self):
+ self.assertRaises(ValueError, self.execute, lambda: 0, times=0)
+ self.assertRaises(ValueError, self.execute, lambda: 0, times=-1)
+ self.assertRaises(ValueError, self.execute, lambda: 0, warmup_times=-1)
+ self.assertRaises(ValueError, self.execute, lambda: 0, tolerance=-1)
+ self.assertRaises(ValueError, self.execute, lambda: 0, retries=-1)
+
+ @retry_on_failure()
+ @unittest.skipIf(CI_TESTING, "skipped on CI")
+ def test_leak_mem(self):
+ ls = []
+
+ def fun(ls=ls):
+ ls.append("x" * 24 * 1024)
+
+ try:
+ # will consume around 3M in total
+ self.assertRaisesRegex(AssertionError, "extra-mem",
+ self.execute, fun, times=50)
+ finally:
+ del ls
+
+ def test_unclosed_files(self):
+ def fun():
+ f = open(__file__)
+ self.addCleanup(f.close)
+ box.append(f)
+
+ box = []
+ kind = "fd" if POSIX else "handle"
+ self.assertRaisesRegex(AssertionError, "unclosed " + kind,
+ self.execute, fun)
+
+ def test_tolerance(self):
+ def fun():
+ ls.append("x" * 24 * 1024)
+ ls = []
+ times = 100
+ self.execute(fun, times=times, warmup_times=0,
+ tolerance=200 * 1024 * 1024)
+ self.assertEqual(len(ls), times + 1)
+
+ def test_execute_w_exc(self):
+ def fun():
+ 1 / 0
+ self.execute_w_exc(ZeroDivisionError, fun)
+ with self.assertRaises(ZeroDivisionError):
+ self.execute_w_exc(OSError, fun)
+
+ def fun():
+ pass
+ with self.assertRaises(AssertionError):
+ self.execute_w_exc(ZeroDivisionError, fun)
+
+
+class TestTestingUtils(PsutilTestCase):
+
+ def test_process_namespace(self):
+ p = psutil.Process()
+ ns = process_namespace(p)
+ ns.test()
+ fun = [x for x in ns.iter(ns.getters) if x[1] == 'ppid'][0][0]
+ self.assertEqual(fun(), p.ppid())
+
+ def test_system_namespace(self):
+ ns = system_namespace()
+ fun = [x for x in ns.iter(ns.getters) if x[1] == 'net_if_addrs'][0][0]
+ self.assertEqual(fun(), psutil.net_if_addrs())
+
+
+class TestOtherUtils(PsutilTestCase):
+
+ def test_is_namedtuple(self):
+ assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3))
+ assert not is_namedtuple(tuple())
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_unicode.py b/lib/psutil/tests/test_unicode.py
new file mode 100644
index 0000000..3fa3f01
--- /dev/null
+++ b/lib/psutil/tests/test_unicode.py
@@ -0,0 +1,355 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Notes about unicode handling in psutil
+======================================
+
+Starting from version 5.3.0 psutil adds unicode support, see:
+https://github.com/giampaolo/psutil/issues/1040
+The notes below apply to *any* API returning a string such as
+process exe(), cwd() or username():
+
+* all strings are encoded by using the OS filesystem encoding
+ (sys.getfilesystemencoding()) which varies depending on the platform
+ (e.g. "UTF-8" on macOS, "mbcs" on Win)
+* no API call is supposed to crash with UnicodeDecodeError
+* instead, in case of badly encoded data returned by the OS, the
+ following error handlers are used to replace the corrupted characters in
+ the string:
+ * Python 3: sys.getfilesystemencodeerrors() (PY 3.6+) or
+ "surrogatescape" on POSIX and "replace" on Windows
+ * Python 2: "replace"
+* on Python 2 all APIs return bytes (str type), never unicode
+* on Python 2, you can go back to unicode by doing:
+
+ >>> unicode(p.exe(), sys.getdefaultencoding(), errors="replace")
+
+For a detailed explanation of how psutil handles unicode see #1040.
+
+Tests
+=====
+
+List of APIs returning or dealing with a string:
+('not tested' means they are not tested to deal with non-ASCII strings):
+
+* Process.cmdline()
+* Process.connections('unix')
+* Process.cwd()
+* Process.environ()
+* Process.exe()
+* Process.memory_maps()
+* Process.name()
+* Process.open_files()
+* Process.username() (not tested)
+
+* disk_io_counters() (not tested)
+* disk_partitions() (not tested)
+* disk_usage(str)
+* net_connections('unix')
+* net_if_addrs() (not tested)
+* net_if_stats() (not tested)
+* net_io_counters() (not tested)
+* sensors_fans() (not tested)
+* sensors_temperatures() (not tested)
+* users() (not tested)
+
+* WindowsService.binpath() (not tested)
+* WindowsService.description() (not tested)
+* WindowsService.display_name() (not tested)
+* WindowsService.name() (not tested)
+* WindowsService.status() (not tested)
+* WindowsService.username() (not tested)
+
+In here we create a unicode path with a funky non-ASCII name and (where
+possible) make psutil return it back (e.g. on name(), exe(), open_files(),
+etc.) and make sure that:
+
+* psutil never crashes with UnicodeDecodeError
+* the returned path matches
+"""
+
+import os
+import shutil
+import traceback
+import unittest
+import warnings
+from contextlib import closing
+
+import psutil
+from psutil import BSD
+from psutil import OPENBSD
+from psutil import POSIX
+from psutil import WINDOWS
+from psutil._compat import PY3
+from psutil._compat import u
+from psutil.tests import APPVEYOR
+from psutil.tests import ASCII_FS
+from psutil.tests import CI_TESTING
+from psutil.tests import HAS_CONNECTIONS_UNIX
+from psutil.tests import HAS_ENVIRON
+from psutil.tests import HAS_MEMORY_MAPS
+from psutil.tests import INVALID_UNICODE_SUFFIX
+from psutil.tests import PYPY
+from psutil.tests import TESTFN_PREFIX
+from psutil.tests import UNICODE_SUFFIX
+from psutil.tests import PsutilTestCase
+from psutil.tests import bind_unix_socket
+from psutil.tests import chdir
+from psutil.tests import copyload_shared_lib
+from psutil.tests import create_exe
+from psutil.tests import get_testfn
+from psutil.tests import safe_mkdir
+from psutil.tests import safe_rmpath
+from psutil.tests import serialrun
+from psutil.tests import skip_on_access_denied
+from psutil.tests import spawn_testproc
+from psutil.tests import terminate
+
+
+if APPVEYOR:
+ def safe_rmpath(path): # NOQA
+ # TODO - this is quite random and I'm not sure why it happens,
+ # nor I can reproduce it locally:
+ # https://ci.appveyor.com/project/giampaolo/psutil/build/job/
+ # jiq2cgd6stsbtn60
+ # safe_rmpath() happens after reap_children() so this is weird
+ # Perhaps wait_procs() on Windows is broken? Maybe because
+ # of STILL_ACTIVE?
+ # https://github.com/giampaolo/psutil/blob/
+ # 68c7a70728a31d8b8b58f4be6c4c0baa2f449eda/psutil/arch/
+ # windows/process_info.c#L146
+ from psutil.tests import safe_rmpath as rm
+ try:
+ return rm(path)
+ except WindowsError:
+ traceback.print_exc()
+
+
+def try_unicode(suffix):
+ """Return True if both the fs and the subprocess module can
+ deal with a unicode file name.
+ """
+ sproc = None
+ testfn = get_testfn(suffix=suffix)
+ try:
+ safe_rmpath(testfn)
+ create_exe(testfn)
+ sproc = spawn_testproc(cmd=[testfn])
+ shutil.copyfile(testfn, testfn + '-2')
+ safe_rmpath(testfn + '-2')
+ except (UnicodeEncodeError, IOError):
+ return False
+ else:
+ return True
+ finally:
+ if sproc is not None:
+ terminate(sproc)
+ safe_rmpath(testfn)
+
+
+# ===================================================================
+# FS APIs
+# ===================================================================
+
+
+class BaseUnicodeTest(PsutilTestCase):
+ funky_suffix = None
+
+ def setUp(self):
+ if self.funky_suffix is not None:
+ if not try_unicode(self.funky_suffix):
+ raise self.skipTest("can't handle unicode str")
+
+
+@serialrun
+@unittest.skipIf(ASCII_FS, "ASCII fs")
+@unittest.skipIf(PYPY and not PY3, "too much trouble on PYPY2")
+class TestFSAPIs(BaseUnicodeTest):
+ """Test FS APIs with a funky, valid, UTF8 path name."""
+
+ funky_suffix = UNICODE_SUFFIX
+
+ @classmethod
+ def setUpClass(cls):
+ cls.funky_name = get_testfn(suffix=cls.funky_suffix)
+ create_exe(cls.funky_name)
+
+ @classmethod
+ def tearDownClass(cls):
+ safe_rmpath(cls.funky_name)
+
+ def expect_exact_path_match(self):
+ # Do not expect psutil to correctly handle unicode paths on
+ # Python 2 if os.listdir() is not able either.
+ here = '.' if isinstance(self.funky_name, str) else u('.')
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ return self.funky_name in os.listdir(here)
+
+ # ---
+
+ def test_proc_exe(self):
+ subp = self.spawn_testproc(cmd=[self.funky_name])
+ p = psutil.Process(subp.pid)
+ exe = p.exe()
+ self.assertIsInstance(exe, str)
+ if self.expect_exact_path_match():
+ self.assertEqual(os.path.normcase(exe),
+ os.path.normcase(self.funky_name))
+
+ def test_proc_name(self):
+ subp = self.spawn_testproc(cmd=[self.funky_name])
+ name = psutil.Process(subp.pid).name()
+ self.assertIsInstance(name, str)
+ if self.expect_exact_path_match():
+ self.assertEqual(name, os.path.basename(self.funky_name))
+
+ def test_proc_cmdline(self):
+ subp = self.spawn_testproc(cmd=[self.funky_name])
+ p = psutil.Process(subp.pid)
+ cmdline = p.cmdline()
+ for part in cmdline:
+ self.assertIsInstance(part, str)
+ if self.expect_exact_path_match():
+ self.assertEqual(cmdline, [self.funky_name])
+
+ def test_proc_cwd(self):
+ dname = self.funky_name + "2"
+ self.addCleanup(safe_rmpath, dname)
+ safe_mkdir(dname)
+ with chdir(dname):
+ p = psutil.Process()
+ cwd = p.cwd()
+ self.assertIsInstance(p.cwd(), str)
+ if self.expect_exact_path_match():
+ self.assertEqual(cwd, dname)
+
+ @unittest.skipIf(PYPY and WINDOWS, "fails on PYPY + WINDOWS")
+ def test_proc_open_files(self):
+ p = psutil.Process()
+ start = set(p.open_files())
+ with open(self.funky_name, 'rb'):
+ new = set(p.open_files())
+ path = (new - start).pop().path
+ self.assertIsInstance(path, str)
+ if BSD and not path:
+ # XXX - see https://github.com/giampaolo/psutil/issues/595
+ return self.skipTest("open_files on BSD is broken")
+ if self.expect_exact_path_match():
+ self.assertEqual(os.path.normcase(path),
+ os.path.normcase(self.funky_name))
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ def test_proc_connections(self):
+ name = self.get_testfn(suffix=self.funky_suffix)
+ try:
+ sock = bind_unix_socket(name)
+ except UnicodeEncodeError:
+ if PY3:
+ raise
+ else:
+ raise unittest.SkipTest("not supported")
+ with closing(sock):
+ conn = psutil.Process().connections('unix')[0]
+ self.assertIsInstance(conn.laddr, str)
+ # AF_UNIX addr not set on OpenBSD
+ if not OPENBSD: # XXX
+ self.assertEqual(conn.laddr, name)
+
+ @unittest.skipIf(not POSIX, "POSIX only")
+ @unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets")
+ @skip_on_access_denied()
+ def test_net_connections(self):
+ def find_sock(cons):
+ for conn in cons:
+ if os.path.basename(conn.laddr).startswith(TESTFN_PREFIX):
+ return conn
+ raise ValueError("connection not found")
+
+ name = self.get_testfn(suffix=self.funky_suffix)
+ try:
+ sock = bind_unix_socket(name)
+ except UnicodeEncodeError:
+ if PY3:
+ raise
+ else:
+ raise unittest.SkipTest("not supported")
+ with closing(sock):
+ cons = psutil.net_connections(kind='unix')
+ # AF_UNIX addr not set on OpenBSD
+ if not OPENBSD:
+ conn = find_sock(cons)
+ self.assertIsInstance(conn.laddr, str)
+ self.assertEqual(conn.laddr, name)
+
+ def test_disk_usage(self):
+ dname = self.funky_name + "2"
+ self.addCleanup(safe_rmpath, dname)
+ safe_mkdir(dname)
+ psutil.disk_usage(dname)
+
+ @unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
+ @unittest.skipIf(not PY3, "ctypes does not support unicode on PY2")
+ @unittest.skipIf(PYPY, "unstable on PYPY")
+ def test_memory_maps(self):
+ # XXX: on Python 2, using ctypes.CDLL with a unicode path
+ # opens a message box which blocks the test run.
+ with copyload_shared_lib(suffix=self.funky_suffix) as funky_path:
+ def normpath(p):
+ return os.path.realpath(os.path.normcase(p))
+ libpaths = [normpath(x.path)
+ for x in psutil.Process().memory_maps()]
+ # ...just to have a clearer msg in case of failure
+ libpaths = [x for x in libpaths if TESTFN_PREFIX in x]
+ self.assertIn(normpath(funky_path), libpaths)
+ for path in libpaths:
+ self.assertIsInstance(path, str)
+
+
+@unittest.skipIf(CI_TESTING, "unreliable on CI")
+class TestFSAPIsWithInvalidPath(TestFSAPIs):
+ """Test FS APIs with a funky, invalid path name."""
+ funky_suffix = INVALID_UNICODE_SUFFIX
+
+ @classmethod
+ def expect_exact_path_match(cls):
+ # Invalid unicode names are supposed to work on Python 2.
+ return True
+
+
+# ===================================================================
+# Non fs APIs
+# ===================================================================
+
+
+class TestNonFSAPIS(BaseUnicodeTest):
+ """Unicode tests for non fs-related APIs."""
+ funky_suffix = UNICODE_SUFFIX if PY3 else 'è'
+
+ @unittest.skipIf(not HAS_ENVIRON, "not supported")
+ @unittest.skipIf(PYPY and WINDOWS, "segfaults on PYPY + WINDOWS")
+ def test_proc_environ(self):
+ # Note: differently from others, this test does not deal
+ # with fs paths. On Python 2 subprocess module is broken as
+ # it's not able to handle with non-ASCII env vars, so
+ # we use "è", which is part of the extended ASCII table
+ # (unicode point <= 255).
+ env = os.environ.copy()
+ env['FUNNY_ARG'] = self.funky_suffix
+ sproc = self.spawn_testproc(env=env)
+ p = psutil.Process(sproc.pid)
+ env = p.environ()
+ for k, v in env.items():
+ self.assertIsInstance(k, str)
+ self.assertIsInstance(v, str)
+ self.assertEqual(env['FUNNY_ARG'], self.funky_suffix)
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)
diff --git a/lib/psutil/tests/test_windows.py b/lib/psutil/tests/test_windows.py
new file mode 100644
index 0000000..55e6731
--- /dev/null
+++ b/lib/psutil/tests/test_windows.py
@@ -0,0 +1,898 @@
+#!/usr/bin/env python3
+# -*- coding: UTF-8 -*
+
+# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Windows specific tests."""
+
+import datetime
+import errno
+import glob
+import os
+import platform
+import re
+import signal
+import subprocess
+import sys
+import time
+import unittest
+import warnings
+
+import psutil
+from psutil import WINDOWS
+from psutil._compat import FileNotFoundError
+from psutil._compat import which
+from psutil._compat import super
+from psutil.tests import APPVEYOR
+from psutil.tests import GITHUB_ACTIONS
+from psutil.tests import HAS_BATTERY
+from psutil.tests import IS_64BIT
+from psutil.tests import PY3
+from psutil.tests import PYPY
+from psutil.tests import TOLERANCE_DISK_USAGE
+from psutil.tests import TOLERANCE_SYS_MEM
+from psutil.tests import PsutilTestCase
+from psutil.tests import mock
+from psutil.tests import retry_on_failure
+from psutil.tests import sh
+from psutil.tests import spawn_testproc
+from psutil.tests import terminate
+
+
+if WINDOWS and not PYPY:
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ import win32api # requires "pip install pywin32"
+ import win32con
+ import win32process
+ import wmi # requires "pip install wmi" / "make setup-dev-env"
+
+if WINDOWS:
+ from psutil._pswindows import convert_oserror
+
+
+cext = psutil._psplatform.cext
+
+
+@unittest.skipIf(not WINDOWS, "WINDOWS only")
+@unittest.skipIf(PYPY, "pywin32 not available on PYPY")
+# https://github.com/giampaolo/psutil/pull/1762#issuecomment-632892692
+@unittest.skipIf(GITHUB_ACTIONS and not PY3, "pywin32 broken on GITHUB + PY2")
+class WindowsTestCase(PsutilTestCase):
+ pass
+
+
+def powershell(cmd):
+ """Currently not used, but avalable just in case. Usage:
+
+ >>> powershell(
+ "Get-CIMInstance Win32_PageFileUsage | Select AllocatedBaseSize")
+ """
+ if not which("powershell.exe"):
+ raise unittest.SkipTest("powershell.exe not available")
+ cmdline = \
+ 'powershell.exe -ExecutionPolicy Bypass -NoLogo -NonInteractive ' + \
+ '-NoProfile -WindowStyle Hidden -Command "%s"' % cmd
+ return sh(cmdline)
+
+
+def wmic(path, what, converter=int):
+ """Currently not used, but avalable just in case. Usage:
+
+ >>> wmic("Win32_OperatingSystem", "FreePhysicalMemory")
+ 2134124534
+ """
+ out = sh("wmic path %s get %s" % (path, what)).strip()
+ data = "".join(out.splitlines()[1:]).strip() # get rid of the header
+ if converter is not None:
+ if "," in what:
+ return tuple([converter(x) for x in data.split()])
+ else:
+ return converter(data)
+ else:
+ return data
+
+
+# ===================================================================
+# System APIs
+# ===================================================================
+
+
+class TestCpuAPIs(WindowsTestCase):
+
+ @unittest.skipIf('NUMBER_OF_PROCESSORS' not in os.environ,
+ 'NUMBER_OF_PROCESSORS env var is not available')
+ def test_cpu_count_vs_NUMBER_OF_PROCESSORS(self):
+ # Will likely fail on many-cores systems:
+ # https://stackoverflow.com/questions/31209256
+ num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
+ self.assertEqual(num_cpus, psutil.cpu_count())
+
+ def test_cpu_count_vs_GetSystemInfo(self):
+ # Will likely fail on many-cores systems:
+ # https://stackoverflow.com/questions/31209256
+ sys_value = win32api.GetSystemInfo()[5]
+ psutil_value = psutil.cpu_count()
+ self.assertEqual(sys_value, psutil_value)
+
+ def test_cpu_count_logical_vs_wmi(self):
+ w = wmi.WMI()
+ procs = sum(proc.NumberOfLogicalProcessors
+ for proc in w.Win32_Processor())
+ self.assertEqual(psutil.cpu_count(), procs)
+
+ def test_cpu_count_cores_vs_wmi(self):
+ w = wmi.WMI()
+ cores = sum(proc.NumberOfCores for proc in w.Win32_Processor())
+ self.assertEqual(psutil.cpu_count(logical=False), cores)
+
+ def test_cpu_count_vs_cpu_times(self):
+ self.assertEqual(psutil.cpu_count(),
+ len(psutil.cpu_times(percpu=True)))
+
+ def test_cpu_freq(self):
+ w = wmi.WMI()
+ proc = w.Win32_Processor()[0]
+ self.assertEqual(proc.CurrentClockSpeed, psutil.cpu_freq().current)
+ self.assertEqual(proc.MaxClockSpeed, psutil.cpu_freq().max)
+
+
+class TestSystemAPIs(WindowsTestCase):
+
+ def test_nic_names(self):
+ out = sh('ipconfig /all')
+ nics = psutil.net_io_counters(pernic=True).keys()
+ for nic in nics:
+ if "pseudo-interface" in nic.replace(' ', '-').lower():
+ continue
+ if nic not in out:
+ raise self.fail(
+ "%r nic wasn't found in 'ipconfig /all' output" % nic)
+
+ def test_total_phymem(self):
+ w = wmi.WMI().Win32_ComputerSystem()[0]
+ self.assertEqual(int(w.TotalPhysicalMemory),
+ psutil.virtual_memory().total)
+
+ def test_free_phymem(self):
+ w = wmi.WMI().Win32_PerfRawData_PerfOS_Memory()[0]
+ self.assertAlmostEqual(
+ int(w.AvailableBytes), psutil.virtual_memory().free,
+ delta=TOLERANCE_SYS_MEM)
+
+ def test_total_swapmem(self):
+ w = wmi.WMI().Win32_PerfRawData_PerfOS_Memory()[0]
+ self.assertEqual(int(w.CommitLimit) - psutil.virtual_memory().total,
+ psutil.swap_memory().total)
+ if (psutil.swap_memory().total == 0):
+ self.assertEqual(0, psutil.swap_memory().free)
+ self.assertEqual(0, psutil.swap_memory().used)
+
+ def test_percent_swapmem(self):
+ if (psutil.swap_memory().total > 0):
+ w = wmi.WMI().Win32_PerfRawData_PerfOS_PagingFile(
+ Name="_Total")[0]
+ # calculate swap usage to percent
+ percentSwap = int(w.PercentUsage) * 100 / int(w.PercentUsage_Base)
+ # exact percent may change but should be reasonable
+ # assert within +/- 5% and between 0 and 100%
+ self.assertGreaterEqual(psutil.swap_memory().percent, 0)
+ self.assertAlmostEqual(psutil.swap_memory().percent, percentSwap,
+ delta=5)
+ self.assertLessEqual(psutil.swap_memory().percent, 100)
+
+ # @unittest.skipIf(wmi is None, "wmi module is not installed")
+ # def test__UPTIME(self):
+ # # _UPTIME constant is not public but it is used internally
+ # # as value to return for pid 0 creation time.
+ # # WMI behaves the same.
+ # w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ # p = psutil.Process(0)
+ # wmic_create = str(w.CreationDate.split('.')[0])
+ # psutil_create = time.strftime("%Y%m%d%H%M%S",
+ # time.localtime(p.create_time()))
+
+ # Note: this test is not very reliable
+ @unittest.skipIf(APPVEYOR, "test not relieable on appveyor")
+ @retry_on_failure()
+ def test_pids(self):
+ # Note: this test might fail if the OS is starting/killing
+ # other processes in the meantime
+ w = wmi.WMI().Win32_Process()
+ wmi_pids = set([x.ProcessId for x in w])
+ psutil_pids = set(psutil.pids())
+ self.assertEqual(wmi_pids, psutil_pids)
+
+ @retry_on_failure()
+ def test_disks(self):
+ ps_parts = psutil.disk_partitions(all=True)
+ wmi_parts = wmi.WMI().Win32_LogicalDisk()
+ for ps_part in ps_parts:
+ for wmi_part in wmi_parts:
+ if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
+ if not ps_part.mountpoint:
+ # this is usually a CD-ROM with no disk inserted
+ break
+ if 'cdrom' in ps_part.opts:
+ break
+ if ps_part.mountpoint.startswith('A:'):
+ break # floppy
+ try:
+ usage = psutil.disk_usage(ps_part.mountpoint)
+ except FileNotFoundError:
+ # usually this is the floppy
+ break
+ self.assertEqual(usage.total, int(wmi_part.Size))
+ wmi_free = int(wmi_part.FreeSpace)
+ self.assertEqual(usage.free, wmi_free)
+ # 10 MB tolerance
+ if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
+ raise self.fail("psutil=%s, wmi=%s" % (
+ usage.free, wmi_free))
+ break
+ else:
+ raise self.fail("can't find partition %s" % repr(ps_part))
+
+ @retry_on_failure()
+ def test_disk_usage(self):
+ for disk in psutil.disk_partitions():
+ if 'cdrom' in disk.opts:
+ continue
+ sys_value = win32api.GetDiskFreeSpaceEx(disk.mountpoint)
+ psutil_value = psutil.disk_usage(disk.mountpoint)
+ self.assertAlmostEqual(sys_value[0], psutil_value.free,
+ delta=TOLERANCE_DISK_USAGE)
+ self.assertAlmostEqual(sys_value[1], psutil_value.total,
+ delta=TOLERANCE_DISK_USAGE)
+ self.assertEqual(psutil_value.used,
+ psutil_value.total - psutil_value.free)
+
+ def test_disk_partitions(self):
+ sys_value = [
+ x + '\\' for x in win32api.GetLogicalDriveStrings().split("\\\x00")
+ if x and not x.startswith('A:')]
+ psutil_value = [x.mountpoint for x in psutil.disk_partitions(all=True)
+ if not x.mountpoint.startswith('A:')]
+ self.assertEqual(sys_value, psutil_value)
+
+ def test_net_if_stats(self):
+ ps_names = set(cext.net_if_stats())
+ wmi_adapters = wmi.WMI().Win32_NetworkAdapter()
+ wmi_names = set()
+ for wmi_adapter in wmi_adapters:
+ wmi_names.add(wmi_adapter.Name)
+ wmi_names.add(wmi_adapter.NetConnectionID)
+ self.assertTrue(ps_names & wmi_names,
+ "no common entries in %s, %s" % (ps_names, wmi_names))
+
+ def test_boot_time(self):
+ wmi_os = wmi.WMI().Win32_OperatingSystem()
+ wmi_btime_str = wmi_os[0].LastBootUpTime.split('.')[0]
+ wmi_btime_dt = datetime.datetime.strptime(
+ wmi_btime_str, "%Y%m%d%H%M%S")
+ psutil_dt = datetime.datetime.fromtimestamp(psutil.boot_time())
+ diff = abs((wmi_btime_dt - psutil_dt).total_seconds())
+ self.assertLessEqual(diff, 5)
+
+ def test_boot_time_fluctuation(self):
+ # https://github.com/giampaolo/psutil/issues/1007
+ with mock.patch('psutil._pswindows.cext.boot_time', return_value=5):
+ self.assertEqual(psutil.boot_time(), 5)
+ with mock.patch('psutil._pswindows.cext.boot_time', return_value=4):
+ self.assertEqual(psutil.boot_time(), 5)
+ with mock.patch('psutil._pswindows.cext.boot_time', return_value=6):
+ self.assertEqual(psutil.boot_time(), 5)
+ with mock.patch('psutil._pswindows.cext.boot_time', return_value=333):
+ self.assertEqual(psutil.boot_time(), 333)
+
+
+# ===================================================================
+# sensors_battery()
+# ===================================================================
+
+
+class TestSensorsBattery(WindowsTestCase):
+
+ def test_has_battery(self):
+ if win32api.GetPwrCapabilities()['SystemBatteriesPresent']:
+ self.assertIsNotNone(psutil.sensors_battery())
+ else:
+ self.assertIsNone(psutil.sensors_battery())
+
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_percent(self):
+ w = wmi.WMI()
+ battery_wmi = w.query('select * from Win32_Battery')[0]
+ battery_psutil = psutil.sensors_battery()
+ self.assertAlmostEqual(
+ battery_psutil.percent, battery_wmi.EstimatedChargeRemaining,
+ delta=1)
+
+ @unittest.skipIf(not HAS_BATTERY, "no battery")
+ def test_power_plugged(self):
+ w = wmi.WMI()
+ battery_wmi = w.query('select * from Win32_Battery')[0]
+ battery_psutil = psutil.sensors_battery()
+ # Status codes:
+ # https://msdn.microsoft.com/en-us/library/aa394074(v=vs.85).aspx
+ self.assertEqual(battery_psutil.power_plugged,
+ battery_wmi.BatteryStatus == 2)
+
+ def test_emulate_no_battery(self):
+ with mock.patch("psutil._pswindows.cext.sensors_battery",
+ return_value=(0, 128, 0, 0)) as m:
+ self.assertIsNone(psutil.sensors_battery())
+ assert m.called
+
+ def test_emulate_power_connected(self):
+ with mock.patch("psutil._pswindows.cext.sensors_battery",
+ return_value=(1, 0, 0, 0)) as m:
+ self.assertEqual(psutil.sensors_battery().secsleft,
+ psutil.POWER_TIME_UNLIMITED)
+ assert m.called
+
+ def test_emulate_power_charging(self):
+ with mock.patch("psutil._pswindows.cext.sensors_battery",
+ return_value=(0, 8, 0, 0)) as m:
+ self.assertEqual(psutil.sensors_battery().secsleft,
+ psutil.POWER_TIME_UNLIMITED)
+ assert m.called
+
+ def test_emulate_secs_left_unknown(self):
+ with mock.patch("psutil._pswindows.cext.sensors_battery",
+ return_value=(0, 0, 0, -1)) as m:
+ self.assertEqual(psutil.sensors_battery().secsleft,
+ psutil.POWER_TIME_UNKNOWN)
+ assert m.called
+
+
+# ===================================================================
+# Process APIs
+# ===================================================================
+
+
+class TestProcess(WindowsTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ def test_issue_24(self):
+ p = psutil.Process(0)
+ self.assertRaises(psutil.AccessDenied, p.kill)
+
+ def test_special_pid(self):
+ p = psutil.Process(4)
+ self.assertEqual(p.name(), 'System')
+ # use __str__ to access all common Process properties to check
+ # that nothing strange happens
+ str(p)
+ p.username()
+ self.assertTrue(p.create_time() >= 0.0)
+ try:
+ rss, vms = p.memory_info()[:2]
+ except psutil.AccessDenied:
+ # expected on Windows Vista and Windows 7
+ if not platform.uname()[1] in ('vista', 'win-7', 'win7'):
+ raise
+ else:
+ self.assertTrue(rss > 0)
+
+ def test_send_signal(self):
+ p = psutil.Process(self.pid)
+ self.assertRaises(ValueError, p.send_signal, signal.SIGINT)
+
+ def test_num_handles_increment(self):
+ p = psutil.Process(os.getpid())
+ before = p.num_handles()
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, os.getpid())
+ after = p.num_handles()
+ self.assertEqual(after, before + 1)
+ win32api.CloseHandle(handle)
+ self.assertEqual(p.num_handles(), before)
+
+ def test_ctrl_signals(self):
+ p = psutil.Process(self.spawn_testproc().pid)
+ p.send_signal(signal.CTRL_C_EVENT)
+ p.send_signal(signal.CTRL_BREAK_EVENT)
+ p.kill()
+ p.wait()
+ self.assertRaises(psutil.NoSuchProcess,
+ p.send_signal, signal.CTRL_C_EVENT)
+ self.assertRaises(psutil.NoSuchProcess,
+ p.send_signal, signal.CTRL_BREAK_EVENT)
+
+ def test_username(self):
+ name = win32api.GetUserNameEx(win32con.NameSamCompatible)
+ if name.endswith('$'):
+ # When running as a service account (most likely to be
+ # NetworkService), these user name calculations don't produce the
+ # same result, causing the test to fail.
+ raise unittest.SkipTest('running as service account')
+ self.assertEqual(psutil.Process().username(), name)
+
+ def test_cmdline(self):
+ sys_value = re.sub('[ ]+', ' ', win32api.GetCommandLine()).strip()
+ psutil_value = ' '.join(psutil.Process().cmdline())
+ if sys_value[0] == '"' != psutil_value[0]:
+ # The PyWin32 command line may retain quotes around argv[0] if they
+ # were used unnecessarily, while psutil will omit them. So remove
+ # the first 2 quotes from sys_value if not in psutil_value.
+ # A path to an executable will not contain quotes, so this is safe.
+ sys_value = sys_value.replace('"', '', 2)
+ self.assertEqual(sys_value, psutil_value)
+
+ # XXX - occasional failures
+
+ # def test_cpu_times(self):
+ # handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ # win32con.FALSE, os.getpid())
+ # self.addCleanup(win32api.CloseHandle, handle)
+ # sys_value = win32process.GetProcessTimes(handle)
+ # psutil_value = psutil.Process().cpu_times()
+ # self.assertAlmostEqual(
+ # psutil_value.user, sys_value['UserTime'] / 10000000.0,
+ # delta=0.2)
+ # self.assertAlmostEqual(
+ # psutil_value.user, sys_value['KernelTime'] / 10000000.0,
+ # delta=0.2)
+
+ def test_nice(self):
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, os.getpid())
+ self.addCleanup(win32api.CloseHandle, handle)
+ sys_value = win32process.GetPriorityClass(handle)
+ psutil_value = psutil.Process().nice()
+ self.assertEqual(psutil_value, sys_value)
+
+ def test_memory_info(self):
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, self.pid)
+ self.addCleanup(win32api.CloseHandle, handle)
+ sys_value = win32process.GetProcessMemoryInfo(handle)
+ psutil_value = psutil.Process(self.pid).memory_info()
+ self.assertEqual(
+ sys_value['PeakWorkingSetSize'], psutil_value.peak_wset)
+ self.assertEqual(
+ sys_value['WorkingSetSize'], psutil_value.wset)
+ self.assertEqual(
+ sys_value['QuotaPeakPagedPoolUsage'], psutil_value.peak_paged_pool)
+ self.assertEqual(
+ sys_value['QuotaPagedPoolUsage'], psutil_value.paged_pool)
+ self.assertEqual(
+ sys_value['QuotaPeakNonPagedPoolUsage'],
+ psutil_value.peak_nonpaged_pool)
+ self.assertEqual(
+ sys_value['QuotaNonPagedPoolUsage'], psutil_value.nonpaged_pool)
+ self.assertEqual(
+ sys_value['PagefileUsage'], psutil_value.pagefile)
+ self.assertEqual(
+ sys_value['PeakPagefileUsage'], psutil_value.peak_pagefile)
+
+ self.assertEqual(psutil_value.rss, psutil_value.wset)
+ self.assertEqual(psutil_value.vms, psutil_value.pagefile)
+
+ def test_wait(self):
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, self.pid)
+ self.addCleanup(win32api.CloseHandle, handle)
+ p = psutil.Process(self.pid)
+ p.terminate()
+ psutil_value = p.wait()
+ sys_value = win32process.GetExitCodeProcess(handle)
+ self.assertEqual(psutil_value, sys_value)
+
+ def test_cpu_affinity(self):
+ def from_bitmask(x):
+ return [i for i in range(64) if (1 << i) & x]
+
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, self.pid)
+ self.addCleanup(win32api.CloseHandle, handle)
+ sys_value = from_bitmask(
+ win32process.GetProcessAffinityMask(handle)[0])
+ psutil_value = psutil.Process(self.pid).cpu_affinity()
+ self.assertEqual(psutil_value, sys_value)
+
+ def test_io_counters(self):
+ handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
+ win32con.FALSE, os.getpid())
+ self.addCleanup(win32api.CloseHandle, handle)
+ sys_value = win32process.GetProcessIoCounters(handle)
+ psutil_value = psutil.Process().io_counters()
+ self.assertEqual(
+ psutil_value.read_count, sys_value['ReadOperationCount'])
+ self.assertEqual(
+ psutil_value.write_count, sys_value['WriteOperationCount'])
+ self.assertEqual(
+ psutil_value.read_bytes, sys_value['ReadTransferCount'])
+ self.assertEqual(
+ psutil_value.write_bytes, sys_value['WriteTransferCount'])
+ self.assertEqual(
+ psutil_value.other_count, sys_value['OtherOperationCount'])
+ self.assertEqual(
+ psutil_value.other_bytes, sys_value['OtherTransferCount'])
+
+ def test_num_handles(self):
+ import ctypes
+ import ctypes.wintypes
+ PROCESS_QUERY_INFORMATION = 0x400
+ handle = ctypes.windll.kernel32.OpenProcess(
+ PROCESS_QUERY_INFORMATION, 0, self.pid)
+ self.addCleanup(ctypes.windll.kernel32.CloseHandle, handle)
+
+ hndcnt = ctypes.wintypes.DWORD()
+ ctypes.windll.kernel32.GetProcessHandleCount(
+ handle, ctypes.byref(hndcnt))
+ sys_value = hndcnt.value
+ psutil_value = psutil.Process(self.pid).num_handles()
+ self.assertEqual(psutil_value, sys_value)
+
+ def test_error_partial_copy(self):
+ # https://github.com/giampaolo/psutil/issues/875
+ exc = WindowsError()
+ exc.winerror = 299
+ with mock.patch("psutil._psplatform.cext.proc_cwd", side_effect=exc):
+ with mock.patch("time.sleep") as m:
+ p = psutil.Process()
+ self.assertRaises(psutil.AccessDenied, p.cwd)
+ self.assertGreaterEqual(m.call_count, 5)
+
+ def test_exe(self):
+ # NtQuerySystemInformation succeeds if process is gone. Make sure
+ # it raises NSP for a non existent pid.
+ pid = psutil.pids()[-1] + 99999
+ proc = psutil._psplatform.Process(pid)
+ self.assertRaises(psutil.NoSuchProcess, proc.exe)
+
+
+class TestProcessWMI(WindowsTestCase):
+ """Compare Process API results with WMI."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ def test_name(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ self.assertEqual(p.name(), w.Caption)
+
+ # This fail on github because using virtualenv for test environment
+ @unittest.skipIf(GITHUB_ACTIONS, "unreliable path on GITHUB_ACTIONS")
+ def test_exe(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ # Note: wmi reports the exe as a lower case string.
+ # Being Windows paths case-insensitive we ignore that.
+ self.assertEqual(p.exe().lower(), w.ExecutablePath.lower())
+
+ def test_cmdline(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ self.assertEqual(' '.join(p.cmdline()),
+ w.CommandLine.replace('"', ''))
+
+ def test_username(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ domain, _, username = w.GetOwner()
+ username = "%s\\%s" % (domain, username)
+ self.assertEqual(p.username(), username)
+
+ @retry_on_failure()
+ def test_memory_rss(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ rss = p.memory_info().rss
+ self.assertEqual(rss, int(w.WorkingSetSize))
+
+ @retry_on_failure()
+ def test_memory_vms(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ vms = p.memory_info().vms
+ # http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
+ # ...claims that PageFileUsage is represented in Kilo
+ # bytes but funnily enough on certain platforms bytes are
+ # returned instead.
+ wmi_usage = int(w.PageFileUsage)
+ if (vms != wmi_usage) and (vms != wmi_usage * 1024):
+ raise self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms))
+
+ def test_create_time(self):
+ w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
+ p = psutil.Process(self.pid)
+ wmic_create = str(w.CreationDate.split('.')[0])
+ psutil_create = time.strftime("%Y%m%d%H%M%S",
+ time.localtime(p.create_time()))
+ self.assertEqual(wmic_create, psutil_create)
+
+
+# ---
+
+
+@unittest.skipIf(not WINDOWS, "WINDOWS only")
+class TestDualProcessImplementation(PsutilTestCase):
+ """
+ Certain APIs on Windows have 2 internal implementations, one
+ based on documented Windows APIs, another one based
+ NtQuerySystemInformation() which gets called as fallback in
+ case the first fails because of limited permission error.
+ Here we test that the two methods return the exact same value,
+ see:
+ https://github.com/giampaolo/psutil/issues/304
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.pid = spawn_testproc().pid
+
+ @classmethod
+ def tearDownClass(cls):
+ terminate(cls.pid)
+
+ def test_memory_info(self):
+ mem_1 = psutil.Process(self.pid).memory_info()
+ with mock.patch("psutil._psplatform.cext.proc_memory_info",
+ side_effect=OSError(errno.EPERM, "msg")) as fun:
+ mem_2 = psutil.Process(self.pid).memory_info()
+ self.assertEqual(len(mem_1), len(mem_2))
+ for i in range(len(mem_1)):
+ self.assertGreaterEqual(mem_1[i], 0)
+ self.assertGreaterEqual(mem_2[i], 0)
+ self.assertAlmostEqual(mem_1[i], mem_2[i], delta=512)
+ assert fun.called
+
+ def test_create_time(self):
+ ctime = psutil.Process(self.pid).create_time()
+ with mock.patch("psutil._psplatform.cext.proc_times",
+ side_effect=OSError(errno.EPERM, "msg")) as fun:
+ self.assertEqual(psutil.Process(self.pid).create_time(), ctime)
+ assert fun.called
+
+ def test_cpu_times(self):
+ cpu_times_1 = psutil.Process(self.pid).cpu_times()
+ with mock.patch("psutil._psplatform.cext.proc_times",
+ side_effect=OSError(errno.EPERM, "msg")) as fun:
+ cpu_times_2 = psutil.Process(self.pid).cpu_times()
+ assert fun.called
+ self.assertAlmostEqual(
+ cpu_times_1.user, cpu_times_2.user, delta=0.01)
+ self.assertAlmostEqual(
+ cpu_times_1.system, cpu_times_2.system, delta=0.01)
+
+ def test_io_counters(self):
+ io_counters_1 = psutil.Process(self.pid).io_counters()
+ with mock.patch("psutil._psplatform.cext.proc_io_counters",
+ side_effect=OSError(errno.EPERM, "msg")) as fun:
+ io_counters_2 = psutil.Process(self.pid).io_counters()
+ for i in range(len(io_counters_1)):
+ self.assertAlmostEqual(
+ io_counters_1[i], io_counters_2[i], delta=5)
+ assert fun.called
+
+ def test_num_handles(self):
+ num_handles = psutil.Process(self.pid).num_handles()
+ with mock.patch("psutil._psplatform.cext.proc_num_handles",
+ side_effect=OSError(errno.EPERM, "msg")) as fun:
+ self.assertEqual(psutil.Process(self.pid).num_handles(),
+ num_handles)
+ assert fun.called
+
+ def test_cmdline(self):
+ for pid in psutil.pids():
+ try:
+ a = cext.proc_cmdline(pid, use_peb=True)
+ b = cext.proc_cmdline(pid, use_peb=False)
+ except OSError as err:
+ err = convert_oserror(err)
+ if not isinstance(err, (psutil.AccessDenied,
+ psutil.NoSuchProcess)):
+ raise
+ else:
+ self.assertEqual(a, b)
+
+
+@unittest.skipIf(not WINDOWS, "WINDOWS only")
+class RemoteProcessTestCase(PsutilTestCase):
+ """Certain functions require calling ReadProcessMemory.
+ This trivially works when called on the current process.
+ Check that this works on other processes, especially when they
+ have a different bitness.
+ """
+
+ @staticmethod
+ def find_other_interpreter():
+ # find a python interpreter that is of the opposite bitness from us
+ code = "import sys; sys.stdout.write(str(sys.maxsize > 2**32))"
+
+ # XXX: a different and probably more stable approach might be to access
+ # the registry but accessing 64 bit paths from a 32 bit process
+ for filename in glob.glob(r"C:\Python*\python.exe"):
+ proc = subprocess.Popen(args=[filename, "-c", code],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output, _ = proc.communicate()
+ proc.wait()
+ if output == str(not IS_64BIT):
+ return filename
+
+ test_args = ["-c", "import sys; sys.stdin.read()"]
+
+ def setUp(self):
+ super().setUp()
+
+ other_python = self.find_other_interpreter()
+ if other_python is None:
+ raise unittest.SkipTest(
+ "could not find interpreter with opposite bitness")
+ if IS_64BIT:
+ self.python64 = sys.executable
+ self.python32 = other_python
+ else:
+ self.python64 = other_python
+ self.python32 = sys.executable
+
+ env = os.environ.copy()
+ env["THINK_OF_A_NUMBER"] = str(os.getpid())
+ self.proc32 = self.spawn_testproc(
+ [self.python32] + self.test_args,
+ env=env,
+ stdin=subprocess.PIPE)
+ self.proc64 = self.spawn_testproc(
+ [self.python64] + self.test_args,
+ env=env,
+ stdin=subprocess.PIPE)
+
+ def tearDown(self):
+ super().tearDown()
+ self.proc32.communicate()
+ self.proc64.communicate()
+
+ def test_cmdline_32(self):
+ p = psutil.Process(self.proc32.pid)
+ self.assertEqual(len(p.cmdline()), 3)
+ self.assertEqual(p.cmdline()[1:], self.test_args)
+
+ def test_cmdline_64(self):
+ p = psutil.Process(self.proc64.pid)
+ self.assertEqual(len(p.cmdline()), 3)
+ self.assertEqual(p.cmdline()[1:], self.test_args)
+
+ def test_cwd_32(self):
+ p = psutil.Process(self.proc32.pid)
+ self.assertEqual(p.cwd(), os.getcwd())
+
+ def test_cwd_64(self):
+ p = psutil.Process(self.proc64.pid)
+ self.assertEqual(p.cwd(), os.getcwd())
+
+ def test_environ_32(self):
+ p = psutil.Process(self.proc32.pid)
+ e = p.environ()
+ self.assertIn("THINK_OF_A_NUMBER", e)
+ self.assertEqual(e["THINK_OF_A_NUMBER"], str(os.getpid()))
+
+ def test_environ_64(self):
+ p = psutil.Process(self.proc64.pid)
+ try:
+ p.environ()
+ except psutil.AccessDenied:
+ pass
+
+
+# ===================================================================
+# Windows services
+# ===================================================================
+
+
+@unittest.skipIf(not WINDOWS, "WINDOWS only")
+class TestServices(PsutilTestCase):
+
+ def test_win_service_iter(self):
+ valid_statuses = set([
+ "running",
+ "paused",
+ "start",
+ "pause",
+ "continue",
+ "stop",
+ "stopped",
+ ])
+ valid_start_types = set([
+ "automatic",
+ "manual",
+ "disabled",
+ ])
+ valid_statuses = set([
+ "running",
+ "paused",
+ "start_pending",
+ "pause_pending",
+ "continue_pending",
+ "stop_pending",
+ "stopped"
+ ])
+ for serv in psutil.win_service_iter():
+ data = serv.as_dict()
+ self.assertIsInstance(data['name'], str)
+ self.assertNotEqual(data['name'].strip(), "")
+ self.assertIsInstance(data['display_name'], str)
+ self.assertIsInstance(data['username'], str)
+ self.assertIn(data['status'], valid_statuses)
+ if data['pid'] is not None:
+ psutil.Process(data['pid'])
+ self.assertIsInstance(data['binpath'], str)
+ self.assertIsInstance(data['username'], str)
+ self.assertIsInstance(data['start_type'], str)
+ self.assertIn(data['start_type'], valid_start_types)
+ self.assertIn(data['status'], valid_statuses)
+ self.assertIsInstance(data['description'], str)
+ pid = serv.pid()
+ if pid is not None:
+ p = psutil.Process(pid)
+ self.assertTrue(p.is_running())
+ # win_service_get
+ s = psutil.win_service_get(serv.name())
+ # test __eq__
+ self.assertEqual(serv, s)
+
+ def test_win_service_get(self):
+ ERROR_SERVICE_DOES_NOT_EXIST = \
+ psutil._psplatform.cext.ERROR_SERVICE_DOES_NOT_EXIST
+ ERROR_ACCESS_DENIED = psutil._psplatform.cext.ERROR_ACCESS_DENIED
+
+ name = next(psutil.win_service_iter()).name()
+ with self.assertRaises(psutil.NoSuchProcess) as cm:
+ psutil.win_service_get(name + '???')
+ self.assertEqual(cm.exception.name, name + '???')
+
+ # test NoSuchProcess
+ service = psutil.win_service_get(name)
+ if PY3:
+ args = (0, "msg", 0, ERROR_SERVICE_DOES_NOT_EXIST)
+ else:
+ args = (ERROR_SERVICE_DOES_NOT_EXIST, "msg")
+ exc = WindowsError(*args)
+ with mock.patch("psutil._psplatform.cext.winservice_query_status",
+ side_effect=exc):
+ self.assertRaises(psutil.NoSuchProcess, service.status)
+ with mock.patch("psutil._psplatform.cext.winservice_query_config",
+ side_effect=exc):
+ self.assertRaises(psutil.NoSuchProcess, service.username)
+
+ # test AccessDenied
+ if PY3:
+ args = (0, "msg", 0, ERROR_ACCESS_DENIED)
+ else:
+ args = (ERROR_ACCESS_DENIED, "msg")
+ exc = WindowsError(*args)
+ with mock.patch("psutil._psplatform.cext.winservice_query_status",
+ side_effect=exc):
+ self.assertRaises(psutil.AccessDenied, service.status)
+ with mock.patch("psutil._psplatform.cext.winservice_query_config",
+ side_effect=exc):
+ self.assertRaises(psutil.AccessDenied, service.username)
+
+ # test __str__ and __repr__
+ self.assertIn(service.name(), str(service))
+ self.assertIn(service.display_name(), str(service))
+ self.assertIn(service.name(), repr(service))
+ self.assertIn(service.display_name(), repr(service))
+
+
+if __name__ == '__main__':
+ from psutil.tests.runner import run_from_name
+ run_from_name(__file__)