summaryrefslogtreecommitdiff
path: root/test/performance/test_utils.py
diff options
context:
space:
mode:
authorSebastian Thiel <byronimo@gmail.com>2010-11-21 21:47:18 +0100
committerSebastian Thiel <byronimo@gmail.com>2010-11-21 22:00:45 +0100
commit48a17c87c15b2fa7ce2e84afa09484f354d57a39 (patch)
tree8664414605c3b8f5176c144c18e5f4b9d0715852 /test/performance/test_utils.py
parent0b813371f5a8af95152cae109d28c7c97bfaf79f (diff)
parent6befb28efd86556e45bb0b213bcfbfa866cac379 (diff)
downloadgitpython-48a17c87c15b2fa7ce2e84afa09484f354d57a39.tar.gz
-#######->WARNING<-####### Directory structure changed, see commit message
If you use git-python as a submodule of your own project, which alters the sys.path to import it, you will have to adjust your code to take the changed directory structure into consideration. Previously, you would put the path ./git-python/lib into your syspath. All modules moved two levels up, which means that the 'git-python' directory now is a package itself. This implies that the submodule's path must change so that the root directory is called 'git'. Your code must now put the directory containing the submodule into the sys.path. For example, if you previously would have the following configuration: ./ext/git-python/lib/git/__init__.py you would now change your submodule path to the following: ./ext/git On the latets revision, the directory structure is changed so that the git/__init__.py file is at the following path: ./ext/git/__init__.py To be able to import git, you need to put ./ext into your sys.path.
Diffstat (limited to 'test/performance/test_utils.py')
-rw-r--r--test/performance/test_utils.py174
1 files changed, 174 insertions, 0 deletions
diff --git a/test/performance/test_utils.py b/test/performance/test_utils.py
new file mode 100644
index 00000000..19c1e84a
--- /dev/null
+++ b/test/performance/test_utils.py
@@ -0,0 +1,174 @@
+"""Performance of utilities"""
+from time import time
+import sys
+import stat
+
+from lib import (
+ TestBigRepoR
+ )
+
+
+class TestUtilPerformance(TestBigRepoR):
+
+ def test_access(self):
+ # compare dict vs. slot access
+ class Slotty(object):
+ __slots__ = "attr"
+ def __init__(self):
+ self.attr = 1
+
+ class Dicty(object):
+ def __init__(self):
+ self.attr = 1
+
+ class BigSlotty(object):
+ __slots__ = ('attr', ) + tuple('abcdefghijk')
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, 1)
+
+ class BigDicty(object):
+ def __init__(self):
+ for attr in BigSlotty.__slots__:
+ setattr(self, attr, 1)
+
+ ni = 1000000
+ for cls in (Slotty, Dicty, BigSlotty, BigDicty):
+ cli = cls()
+ st = time()
+ for i in xrange(ni):
+ cli.attr
+ # END for each access
+ elapsed = time() - st
+ print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (cls.__name__, ni, elapsed, ni / elapsed)
+ # END for each class type
+
+ # check num of sequence-acceses
+ for cls in (list, tuple):
+ x = 10
+ st = time()
+ s = cls(range(x))
+ for i in xrange(ni):
+ s[0]
+ s[1]
+ s[2]
+ # END for
+ elapsed = time() - st
+ na = ni * 3
+ print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (cls.__name__, na, elapsed, na / elapsed)
+ # END for each sequence
+
+ def test_instantiation(self):
+ ni = 100000
+ max_num_items = 4
+ for mni in range(max_num_items+1):
+ for cls in (tuple, list):
+ st = time()
+ for i in xrange(ni):
+ if mni == 0:
+ cls()
+ elif mni == 1:
+ cls((1,))
+ elif mni == 2:
+ cls((1,2))
+ elif mni == 3:
+ cls((1,2,3))
+ elif mni == 4:
+ cls((1,2,3,4))
+ else:
+ cls(x for x in xrange(mni))
+ # END handle empty cls
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (ni, cls.__name__, mni, elapsed, ni / elapsed)
+ # END for each type
+ # END for each item count
+
+ # tuple and tuple direct
+ st = time()
+ for i in xrange(ni):
+ t = (1,2,3,4)
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ t = tuple((1,2,3,4))
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+
+ def test_unpacking_vs_indexing(self):
+ ni = 1000000
+ list_items = [1,2,3,4]
+ tuple_items = (1,2,3,4)
+
+ for sequence in (list_items, tuple_items):
+ st = time()
+ for i in xrange(ni):
+ one, two, three, four = sequence
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ one, two, three, four = sequence[0], sequence[1], sequence[2], sequence[3]
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ one, two = sequence[0], sequence[1]
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ # END for each sequence
+
+ def test_large_list_vs_iteration(self):
+ # what costs more: alloc/realloc of lists, or the cpu strain of iterators ?
+ def slow_iter(ni):
+ for i in xrange(ni):
+ yield i
+ # END slow iter - be closer to the real world
+
+ # alloc doesn't play a role here it seems
+ for ni in (500, 1000, 10000, 20000, 40000):
+ st = time()
+ for i in list(xrange(ni)):
+ i
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Iterated %i items from list in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in slow_iter(ni):
+ i
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Iterated %i items from iterator in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+ # END for each number of iterations
+
+ def test_type_vs_inst_class(self):
+ class NewType(object):
+ pass
+
+ # lets see which way is faster
+ inst = NewType()
+
+ ni = 1000000
+ st = time()
+ for i in xrange(ni):
+ inst.__class__()
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ type(inst)()
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i items using type(inst)() in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)