summaryrefslogtreecommitdiff
path: root/numpy/testing/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/testing/utils.py')
-rw-r--r--numpy/testing/utils.py106
1 files changed, 61 insertions, 45 deletions
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 97408addb..75d974b18 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -118,11 +118,50 @@ def rand(*args):
f[i] = random.random()
return results
-if sys.platform[:5] == 'linux':
+if os.name == 'nt':
+ # Code "stolen" from enthought/debug/memusage.py
+ def GetPerformanceAttributes(object, counter, instance=None,
+ inum=-1, format=None, machine=None):
+ # NOTE: Many counters require 2 samples to give accurate results,
+ # including "% Processor Time" (as by definition, at any instant, a
+ # thread's CPU usage is either 0 or 100). To read counters like this,
+ # you should copy this function, but keep the counter open, and call
+ # CollectQueryData() each time you need to know.
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
+ # My older explanation for this was that the "AddCounter" process forced
+ # the CPU to 100%, but the above makes more sense :)
+ import win32pdh
+ if format is None:
+ format = win32pdh.PDH_FMT_LONG
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
+ hq = win32pdh.OpenQuery()
+ try:
+ hc = win32pdh.AddCounter(hq, path)
+ try:
+ win32pdh.CollectQueryData(hq)
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
+ return val
+ finally:
+ win32pdh.RemoveCounter(hc)
+ finally:
+ win32pdh.CloseQuery(hq)
+
+ def memusage(processName="python", instance=0):
+ # from win32pdhutil, part of the win32all package
+ import win32pdh
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
+ processName, instance,
+ win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
_load_time=[]):
- """ Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc. """
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
import time
if not _load_time:
_load_time.append(time.time())
@@ -135,7 +174,9 @@ if sys.platform[:5] == 'linux':
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """ Return virtual memory size in bytes of the running python.
+ """
+ Return virtual memory size in bytes of the running python.
+
"""
try:
f = open(_proc_pid_stat, 'r')
@@ -149,51 +190,25 @@ else:
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
- """ Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. [Emulation with time.time]. """
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
- """ Return memory usage of running python. [Not implemented]"""
- raise NotImplementedError
+ """
+ Return memory usage of running python. [Not implemented]
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
+ """
+ raise NotImplementedError
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
@@ -688,7 +703,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
names=('x', 'y'), precision=precision)
if not cond:
raise AssertionError(msg)
- except ValueError as e:
+ except ValueError:
import traceback
efmt = traceback.format_exc()
header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
@@ -1253,23 +1268,24 @@ def measure(code_str,times=1,label=None):
elapsed = jiffies() - elapsed
return 0.01*elapsed
+
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
import numpy as np
- a = np.arange(100 * 100)
+
b = np.arange(100*100).reshape(100, 100)
c = b
-
i = 1
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
-
assert_(sys.getrefcount(i) >= rc)
+ del d # for pyflakes
+
def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False,
err_msg='', verbose=True):