summaryrefslogtreecommitdiff
path: root/benchmarks/benchmark.py
diff options
context:
space:
mode:
authorCharles Harris <charlesr.harris@gmail.com>2013-02-28 18:46:35 -0700
committerCharles Harris <charlesr.harris@gmail.com>2013-02-28 18:46:35 -0700
commitd1e6fc3b81bc0557d74771cfffa04af2c62012f7 (patch)
treef3a82f46b98cc82bd4142b1bc71f7f9f8f4bac0c /benchmarks/benchmark.py
parent0934653e151969f6912c911b5113306bd5f450f1 (diff)
downloadnumpy-d1e6fc3b81bc0557d74771cfffa04af2c62012f7.tar.gz
REM: Remove benchmarks files.
The files are very basic, old benchmarks testing numpy against numeric and numarray. The competitors are almost defunct and, while benchmarks are awesome, we really need a more polished and complete framework that runs against the current competition. I think the early results from these benchmarks were posted, maybe even presented, and could be found in a search. Closes #3088 ;) So old a tuple parameter was used.
Diffstat (limited to 'benchmarks/benchmark.py')
-rw-r--r--benchmarks/benchmark.py42
1 files changed, 0 insertions, 42 deletions
diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
deleted file mode 100644
index 047379451..000000000
--- a/benchmarks/benchmark.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from timeit import Timer
-
-class Benchmark(dict):
- """Benchmark a feature in different modules."""
-
- def __init__(self,modules,title='',runs=3,reps=1000):
- self.module_test = dict((m,'') for m in modules)
- self.runs = runs
- self.reps = reps
- self.title = title
-
- def __setitem__(self,module,(test_str,setup_str)):
- """Set the test code for modules."""
- if module == 'all':
- modules = self.module_test.keys()
- else:
- modules = [module]
-
- for m in modules:
- setup_str = 'import %s; import %s as np; ' % (m,m) \
- + setup_str
- self.module_test[m] = Timer(test_str, setup_str)
-
- def run(self):
- """Run the benchmark on the different modules."""
- module_column_len = max(len(mod) for mod in self.module_test)
-
- if self.title:
- print self.title
- print 'Doing %d runs, each with %d reps.' % (self.runs,self.reps)
- print '-'*79
-
- for mod in sorted(self.module_test):
- modname = mod.ljust(module_column_len)
- try:
- print "%s: %s" % (modname, \
- self.module_test[mod].repeat(self.runs,self.reps))
- except Exception as e:
- print "%s: Failed to benchmark (%s)." % (modname,e)
-
- print '-'*79
- print