diff options
Diffstat (limited to 'git/test/performance/db')
-rw-r--r-- | git/test/performance/db/looseodb_impl.py | 210 | ||||
-rw-r--r-- | git/test/performance/db/odb_impl.py | 122 | ||||
-rw-r--r-- | git/test/performance/db/packedodb_impl.py | 178 | ||||
-rw-r--r-- | git/test/performance/db/test_looseodb_cmd.py | 10 | ||||
-rw-r--r-- | git/test/performance/db/test_looseodb_dulwich.py | 10 | ||||
-rw-r--r-- | git/test/performance/db/test_looseodb_pure.py | 4 | ||||
-rw-r--r-- | git/test/performance/db/test_looseodb_pygit2.py | 10 | ||||
-rw-r--r-- | git/test/performance/db/test_odb_cmd.py | 4 | ||||
-rw-r--r-- | git/test/performance/db/test_odb_dulwich.py | 10 | ||||
-rw-r--r-- | git/test/performance/db/test_odb_pure.py | 4 | ||||
-rw-r--r-- | git/test/performance/db/test_odb_pygit2.py | 10 | ||||
-rw-r--r-- | git/test/performance/db/test_packedodb_pure.py | 136 |
12 files changed, 354 insertions, 354 deletions
diff --git a/git/test/performance/db/looseodb_impl.py b/git/test/performance/db/looseodb_impl.py index 6d3c1fa6..0c451906 100644 --- a/git/test/performance/db/looseodb_impl.py +++ b/git/test/performance/db/looseodb_impl.py @@ -4,18 +4,18 @@ from git.base import * from git.stream import * from async import ChannelThreadTask from git.util import ( - pool, - bin_to_hex - ) + pool, + bin_to_hex + ) import os import sys from time import time from git.test.lib import ( - GlobalsItemDeletorMetaCls, - make_memory_file, - with_rw_repo - ) + GlobalsItemDeletorMetaCls, + make_memory_file, + with_rw_repo + ) from git.test.performance.lib import TestBigRepoR @@ -23,110 +23,110 @@ from git.test.performance.lib import TestBigRepoR #{ Utilities def read_chunked_stream(stream): - total = 0 - while True: - chunk = stream.read(chunk_size) - total += len(chunk) - if len(chunk) < chunk_size: - break - # END read stream loop - assert total == stream.size - return stream - - + total = 0 + while True: + chunk = stream.read(chunk_size) + total += len(chunk) + if len(chunk) < chunk_size: + break + # END read stream loop + assert total == stream.size + return stream + + class TestStreamReader(ChannelThreadTask): - """Expects input streams and reads them in chunks. It will read one at a time, - requireing a queue chunk of size 1""" - def __init__(self, *args): - super(TestStreamReader, self).__init__(*args) - self.fun = read_chunked_stream - self.max_chunksize = 1 - + """Expects input streams and reads them in chunks. It will read one at a time, + requireing a queue chunk of size 1""" + def __init__(self, *args): + super(TestStreamReader, self).__init__(*args) + self.fun = read_chunked_stream + self.max_chunksize = 1 + #} END utilities class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): - ModuleToDelete = 'TestLooseDBWPerformanceBase' + ModuleToDelete = 'TestLooseDBWPerformanceBase' class TestLooseDBWPerformanceBase(TestBigRepoR): - __metaclass__ = PerfBaseDeletorMetaClass - - large_data_size_bytes = 1000*1000*10 # some MiB should do it - moderate_data_size_bytes = 1000*1000*1 # just 1 MiB - - #{ Configuration - LooseODBCls = None - #} END configuration - - @classmethod - def setUpAll(cls): - super(TestLooseDBWPerformanceBase, cls).setUpAll() - if cls.LooseODBCls is None: - raise AssertionError("LooseODBCls must be set in subtype") - #END assert configuration - # currently there is no additional configuration - - @with_rw_repo("HEAD") - def test_large_data_streaming(self, rwrepo): - # TODO: This part overlaps with the same file in git.test.performance.test_stream - # It should be shared if possible - objects_path = rwrepo.db_path('') - ldb = self.LooseODBCls(objects_path) - - for randomize in range(2): - desc = (randomize and 'random ') or '' - print >> sys.stderr, "Creating %s data ..." % desc - st = time() - size, stream = make_memory_file(self.large_data_size_bytes, randomize) - elapsed = time() - st - print >> sys.stderr, "Done (in %f s)" % elapsed - - # writing - due to the compression it will seem faster than it is - st = time() - binsha = ldb.store(IStream('blob', size, stream)).binsha - elapsed_add = time() - st - assert ldb.has_object(binsha) - hexsha = bin_to_hex(binsha) - db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:]) - fsize_kib = os.path.getsize(db_file) / 1000 - - - size_kib = size / 1000 - print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) - - # reading all at once - st = time() - ostream = ldb.stream(binsha) - shadata = ostream.read() - elapsed_readall = time() - st - - stream.seek(0) - assert shadata == stream.getvalue() - print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall) - - - # reading in chunks of 1 MiB - cs = 512*1000 - chunks = list() - st = time() - ostream = ldb.stream(binsha) - while True: - data = ostream.read(cs) - chunks.append(data) - if len(data) < cs: - break - # END read in chunks - elapsed_readchunks = time() - st - - stream.seek(0) - assert ''.join(chunks) == stream.getvalue() - - cs_kib = cs / 1000 - print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) - - # del db file so git has something to do - os.remove(db_file) - # END for each randomization factor - + __metaclass__ = PerfBaseDeletorMetaClass + + large_data_size_bytes = 1000*1000*10 # some MiB should do it + moderate_data_size_bytes = 1000*1000*1 # just 1 MiB + + #{ Configuration + LooseODBCls = None + #} END configuration + + @classmethod + def setUpAll(cls): + super(TestLooseDBWPerformanceBase, cls).setUpAll() + if cls.LooseODBCls is None: + raise AssertionError("LooseODBCls must be set in subtype") + #END assert configuration + # currently there is no additional configuration + + @with_rw_repo("HEAD") + def test_large_data_streaming(self, rwrepo): + # TODO: This part overlaps with the same file in git.test.performance.test_stream + # It should be shared if possible + objects_path = rwrepo.db_path('') + ldb = self.LooseODBCls(objects_path) + + for randomize in range(2): + desc = (randomize and 'random ') or '' + print >> sys.stderr, "Creating %s data ..." % desc + st = time() + size, stream = make_memory_file(self.large_data_size_bytes, randomize) + elapsed = time() - st + print >> sys.stderr, "Done (in %f s)" % elapsed + + # writing - due to the compression it will seem faster than it is + st = time() + binsha = ldb.store(IStream('blob', size, stream)).binsha + elapsed_add = time() - st + assert ldb.has_object(binsha) + hexsha = bin_to_hex(binsha) + db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:]) + fsize_kib = os.path.getsize(db_file) / 1000 + + + size_kib = size / 1000 + print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) + + # reading all at once + st = time() + ostream = ldb.stream(binsha) + shadata = ostream.read() + elapsed_readall = time() - st + + stream.seek(0) + assert shadata == stream.getvalue() + print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall) + + + # reading in chunks of 1 MiB + cs = 512*1000 + chunks = list() + st = time() + ostream = ldb.stream(binsha) + while True: + data = ostream.read(cs) + chunks.append(data) + if len(data) < cs: + break + # END read in chunks + elapsed_readchunks = time() - st + + stream.seek(0) + assert ''.join(chunks) == stream.getvalue() + + cs_kib = cs / 1000 + print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) + + # del db file so git has something to do + os.remove(db_file) + # END for each randomization factor + diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py index 677cf6a8..887604c0 100644 --- a/git/test/performance/db/odb_impl.py +++ b/git/test/performance/db/odb_impl.py @@ -5,68 +5,68 @@ import sys import stat from git.test.performance.lib import ( - TestBigRepoR, - GlobalsItemDeletorMetaCls - ) + TestBigRepoR, + GlobalsItemDeletorMetaCls + ) class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): - ModuleToDelete = 'TestObjDBPerformanceBase' - + ModuleToDelete = 'TestObjDBPerformanceBase' + class TestObjDBPerformanceBase(TestBigRepoR): - __metaclass__ = PerfBaseDeletorMetaClass - - #{ Configuration - RepoCls = None # to be set by subclass - #} END configuration - - def test_random_access_test(self): - repo = self.rorepo - - # GET COMMITS - st = time() - root_commit = repo.commit(self.head_sha_2k) - commits = list(root_commit.traverse()) - nc = len(commits) - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed) - - # GET TREES - # walk all trees of all commits - st = time() - blobs_per_commit = list() - nt = 0 - for commit in commits: - tree = commit.tree - blobs = list() - for item in tree.traverse(): - nt += 1 - if item.type == 'blob': - blobs.append(item) - # direct access for speed - # END while trees are there for walking - blobs_per_commit.append(blobs) - # END for each commit - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed) - - # GET BLOBS - st = time() - nb = 0 - too_many = 15000 - data_bytes = 0 - for blob_list in blobs_per_commit: - for blob in blob_list: - data_bytes += len(blob.data_stream.read()) - # END for each blobsha - nb += len(blob_list) - if nb > too_many: - break - # END for each bloblist - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed) - - + __metaclass__ = PerfBaseDeletorMetaClass + + #{ Configuration + RepoCls = None # to be set by subclass + #} END configuration + + def test_random_access_test(self): + repo = self.rorepo + + # GET COMMITS + st = time() + root_commit = repo.commit(self.head_sha_2k) + commits = list(root_commit.traverse()) + nc = len(commits) + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed) + + # GET TREES + # walk all trees of all commits + st = time() + blobs_per_commit = list() + nt = 0 + for commit in commits: + tree = commit.tree + blobs = list() + for item in tree.traverse(): + nt += 1 + if item.type == 'blob': + blobs.append(item) + # direct access for speed + # END while trees are there for walking + blobs_per_commit.append(blobs) + # END for each commit + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed) + + # GET BLOBS + st = time() + nb = 0 + too_many = 15000 + data_bytes = 0 + for blob_list in blobs_per_commit: + for blob in blob_list: + data_bytes += len(blob.data_stream.read()) + # END for each blobsha + nb += len(blob_list) + if nb > too_many: + break + # END for each bloblist + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed) + + diff --git a/git/test/performance/db/packedodb_impl.py b/git/test/performance/db/packedodb_impl.py index b95a8d13..1ca7c8a0 100644 --- a/git/test/performance/db/packedodb_impl.py +++ b/git/test/performance/db/packedodb_impl.py @@ -4,9 +4,9 @@ # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Performance tests for object store""" from git.test.performance.lib import ( - TestBigRepoR, - GlobalsItemDeletorMetaCls - ) + TestBigRepoR, + GlobalsItemDeletorMetaCls + ) from git.exc import UnsupportedOperation @@ -17,91 +17,91 @@ import random class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): - ModuleToDelete = 'TestPurePackedODBPerformanceBase' + ModuleToDelete = 'TestPurePackedODBPerformanceBase' class TestPurePackedODBPerformanceBase(TestBigRepoR): - __metaclass__ = PerfBaseDeletorMetaClass - - #{ Configuration - PackedODBCls = None - #} END configuration - - @classmethod - def setUpAll(cls): - super(TestPurePackedODBPerformanceBase, cls).setUpAll() - if cls.PackedODBCls is None: - raise AssertionError("PackedODBCls must be set in subclass") - #END assert configuration - cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack")) - - def test_pack_random_access(self): - pdb = self.ropdb - - # sha lookup - st = time() - sha_list = list(pdb.sha_iter()) - elapsed = time() - st - ns = len(sha_list) - print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed) - - # sha lookup: best-case and worst case access - pdb_pack_info = pdb._pack_info - # END shuffle shas - st = time() - for sha in sha_list: - pdb_pack_info(sha) - # END for each sha to look up - elapsed = time() - st - - # discard cache - del(pdb._entities) - pdb.entities() - print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed) - # END for each random mode - - # query info and streams only - max_items = 10000 # can wait longer when testing memory - for pdb_fun in (pdb.info, pdb.stream): - st = time() - for sha in sha_list[:max_items]: - pdb_fun(sha) - elapsed = time() - st - print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed) - # END for each function - - # retrieve stream and read all - max_items = 5000 - pdb_stream = pdb.stream - total_size = 0 - st = time() - for sha in sha_list[:max_items]: - stream = pdb_stream(sha) - stream.read() - total_size += stream.size - elapsed = time() - st - total_kib = total_size / 1000 - print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) - - def test_correctness(self): - pdb = self.ropdb - # disabled for now as it used to work perfectly, checking big repositories takes a long time - print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" - for crc in range(2): - count = 0 - st = time() - for entity in pdb.entities(): - pack_verify = entity.is_valid_stream - sha_by_index = entity.index().sha - for index in xrange(entity.index().size()): - try: - assert pack_verify(sha_by_index(index), use_crc=crc) - count += 1 - except UnsupportedOperation: - pass - # END ignore old indices - # END for each index - # END for each entity - elapsed = time() - st - print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed) - # END for each verify mode - + __metaclass__ = PerfBaseDeletorMetaClass + + #{ Configuration + PackedODBCls = None + #} END configuration + + @classmethod + def setUpAll(cls): + super(TestPurePackedODBPerformanceBase, cls).setUpAll() + if cls.PackedODBCls is None: + raise AssertionError("PackedODBCls must be set in subclass") + #END assert configuration + cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack")) + + def test_pack_random_access(self): + pdb = self.ropdb + + # sha lookup + st = time() + sha_list = list(pdb.sha_iter()) + elapsed = time() - st + ns = len(sha_list) + print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed) + + # sha lookup: best-case and worst case access + pdb_pack_info = pdb._pack_info + # END shuffle shas + st = time() + for sha in sha_list: + pdb_pack_info(sha) + # END for each sha to look up + elapsed = time() - st + + # discard cache + del(pdb._entities) + pdb.entities() + print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed) + # END for each random mode + + # query info and streams only + max_items = 10000 # can wait longer when testing memory + for pdb_fun in (pdb.info, pdb.stream): + st = time() + for sha in sha_list[:max_items]: + pdb_fun(sha) + elapsed = time() - st + print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed) + # END for each function + + # retrieve stream and read all + max_items = 5000 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in sha_list[:max_items]: + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) + + def test_correctness(self): + pdb = self.ropdb + # disabled for now as it used to work perfectly, checking big repositories takes a long time + print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" + for crc in range(2): + count = 0 + st = time() + for entity in pdb.entities(): + pack_verify = entity.is_valid_stream + sha_by_index = entity.index().sha + for index in xrange(entity.index().size()): + try: + assert pack_verify(sha_by_index(index), use_crc=crc) + count += 1 + except UnsupportedOperation: + pass + # END ignore old indices + # END for each index + # END for each entity + elapsed = time() - st + print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed) + # END for each verify mode + diff --git a/git/test/performance/db/test_looseodb_cmd.py b/git/test/performance/db/test_looseodb_cmd.py index 9738278c..9147eff6 100644 --- a/git/test/performance/db/test_looseodb_cmd.py +++ b/git/test/performance/db/test_looseodb_cmd.py @@ -4,8 +4,8 @@ from looseodb_impl import TestLooseDBWPerformanceBase import sys class TestCmdLooseDB(TestLooseDBWPerformanceBase): - LooseODBCls = CmdCompatibilityGitDB - - def test_info(self): - sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python") - + LooseODBCls = CmdCompatibilityGitDB + + def test_info(self): + sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python") + diff --git a/git/test/performance/db/test_looseodb_dulwich.py b/git/test/performance/db/test_looseodb_dulwich.py index e123ebf1..174be83d 100644 --- a/git/test/performance/db/test_looseodb_dulwich.py +++ b/git/test/performance/db/test_looseodb_dulwich.py @@ -1,13 +1,13 @@ try: - from git.db.dulwich.complex import DulwichGitODB + from git.db.dulwich.complex import DulwichGitODB except ImportError: - from git.db.py.complex import PureGitODB as DulwichGitODB + from git.db.py.complex import PureGitODB as DulwichGitODB #END handle import from git.test.db.dulwich.lib import DulwichRequiredMetaMixin from looseodb_impl import TestLooseDBWPerformanceBase class TestPureLooseDB(TestLooseDBWPerformanceBase): - __metaclass__ = DulwichRequiredMetaMixin - LooseODBCls = DulwichGitODB - + __metaclass__ = DulwichRequiredMetaMixin + LooseODBCls = DulwichGitODB + diff --git a/git/test/performance/db/test_looseodb_pure.py b/git/test/performance/db/test_looseodb_pure.py index 46f39d5e..bb080612 100644 --- a/git/test/performance/db/test_looseodb_pure.py +++ b/git/test/performance/db/test_looseodb_pure.py @@ -2,5 +2,5 @@ from git.db.py.loose import PureLooseObjectODB from looseodb_impl import TestLooseDBWPerformanceBase class TestPureLooseDB(TestLooseDBWPerformanceBase): - LooseODBCls = PureLooseObjectODB - + LooseODBCls = PureLooseObjectODB + diff --git a/git/test/performance/db/test_looseodb_pygit2.py b/git/test/performance/db/test_looseodb_pygit2.py index 326af9fb..a9661111 100644 --- a/git/test/performance/db/test_looseodb_pygit2.py +++ b/git/test/performance/db/test_looseodb_pygit2.py @@ -1,13 +1,13 @@ try: - from git.db.pygit2.complex import Pygit2GitODB + from git.db.pygit2.complex import Pygit2GitODB except ImportError: - from git.db.py.complex import PureGitODB as Pygit2GitODB + from git.db.py.complex import PureGitODB as Pygit2GitODB #END handle import from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin from looseodb_impl import TestLooseDBWPerformanceBase class TestPureLooseDB(TestLooseDBWPerformanceBase): - __metaclass__ = Pygit2RequiredMetaMixin - LooseODBCls = Pygit2GitODB - + __metaclass__ = Pygit2RequiredMetaMixin + LooseODBCls = Pygit2GitODB + diff --git a/git/test/performance/db/test_odb_cmd.py b/git/test/performance/db/test_odb_cmd.py index acd55cc9..37af34fd 100644 --- a/git/test/performance/db/test_odb_cmd.py +++ b/git/test/performance/db/test_odb_cmd.py @@ -2,5 +2,5 @@ from git.db.complex import CmdCompatibilityGitDB from odb_impl import TestObjDBPerformanceBase class TestCmdDB(TestObjDBPerformanceBase): - RepoCls = CmdCompatibilityGitDB - + RepoCls = CmdCompatibilityGitDB + diff --git a/git/test/performance/db/test_odb_dulwich.py b/git/test/performance/db/test_odb_dulwich.py index 6802483c..33abc88c 100644 --- a/git/test/performance/db/test_odb_dulwich.py +++ b/git/test/performance/db/test_odb_dulwich.py @@ -1,13 +1,13 @@ try: - from git.db.dulwich.complex import DulwichCompatibilityGitDB + from git.db.dulwich.complex import DulwichCompatibilityGitDB except ImportError: - from git.db.complex import PureCompatibilityGitDB as DulwichCompatibilityGitDB + from git.db.complex import PureCompatibilityGitDB as DulwichCompatibilityGitDB #END handle dulwich compatibility from git.test.db.dulwich.lib import DulwichRequiredMetaMixin from odb_impl import TestObjDBPerformanceBase class TestDulwichDB(TestObjDBPerformanceBase): - __metaclass__ = DulwichRequiredMetaMixin - RepoCls = DulwichCompatibilityGitDB - + __metaclass__ = DulwichRequiredMetaMixin + RepoCls = DulwichCompatibilityGitDB + diff --git a/git/test/performance/db/test_odb_pure.py b/git/test/performance/db/test_odb_pure.py index 6ed3585d..93139c57 100644 --- a/git/test/performance/db/test_odb_pure.py +++ b/git/test/performance/db/test_odb_pure.py @@ -2,5 +2,5 @@ from git.db.complex import PureCompatibilityGitDB from odb_impl import TestObjDBPerformanceBase class TestPureDB(TestObjDBPerformanceBase): - RepoCls = PureCompatibilityGitDB - + RepoCls = PureCompatibilityGitDB + diff --git a/git/test/performance/db/test_odb_pygit2.py b/git/test/performance/db/test_odb_pygit2.py index bb7ed8a9..c5911ae3 100644 --- a/git/test/performance/db/test_odb_pygit2.py +++ b/git/test/performance/db/test_odb_pygit2.py @@ -1,13 +1,13 @@ try: - from git.db.pygit2.complex import Pygit2CompatibilityGitDB + from git.db.pygit2.complex import Pygit2CompatibilityGitDB except ImportError: - from git.db.complex import PureCompatibilityGitDB as Pygit2CompatibilityGitDB + from git.db.complex import PureCompatibilityGitDB as Pygit2CompatibilityGitDB #END handle pygit2 compatibility from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin from odb_impl import TestObjDBPerformanceBase class TestPygit2DB(TestObjDBPerformanceBase): - __metaclass__ = Pygit2RequiredMetaMixin - RepoCls = Pygit2CompatibilityGitDB - + __metaclass__ = Pygit2RequiredMetaMixin + RepoCls = Pygit2CompatibilityGitDB + diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py index 11497d9d..90e8381f 100644 --- a/git/test/performance/db/test_packedodb_pure.py +++ b/git/test/performance/db/test_packedodb_pure.py @@ -17,73 +17,73 @@ from nose import SkipTest class CountedNullStream(NullStream): - __slots__ = '_bw' - def __init__(self): - self._bw = 0 - - def bytes_written(self): - return self._bw - - def write(self, d): - self._bw += NullStream.write(self, d) - + __slots__ = '_bw' + def __init__(self): + self._bw = 0 + + def bytes_written(self): + return self._bw + + def write(self, d): + self._bw += NullStream.write(self, d) + class TestPurePackedODB(TestPurePackedODBPerformanceBase): - #{ Configuration - PackedODBCls = PurePackedODB - #} END configuration - - def test_pack_writing_note(self): - sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info") - raise SkipTest() - - def test_pack_writing(self): - # see how fast we can write a pack from object streams. - # This will not be fast, as we take time for decompressing the streams as well - # For now we test the fast streaming and slow streaming versions manually - ostream = CountedNullStream() - # NOTE: We use the same repo twice to see whether OS caching helps - for rorepo in (self.rorepo, self.rorepo, self.ropdb): - - ni = 5000 - count = 0 - total_size = 0 - st = time() - for sha in rorepo.sha_iter(): - count += 1 - rorepo.stream(sha) - if count == ni: - break - #END gather objects for pack-writing - elapsed = time() - st - print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (count, rorepo.__class__.__name__, elapsed, count / elapsed) - - st = time() - PackEntity.write_pack((rorepo.stream(sha) for sha in rorepo.sha_iter()), ostream.write, object_count=ni) - elapsed = time() - st - total_kb = ostream.bytes_written() / 1000 - print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) - #END for each rorepo - - - def test_stream_reading(self): - raise SkipTest("This test was only used for --with-profile runs") - pdb = self.ropdb - - # streaming only, meant for --with-profile runs - ni = 5000 - count = 0 - pdb_stream = pdb.stream - total_size = 0 - st = time() - for sha in pdb.sha_iter(): - if count == ni: - break - stream = pdb_stream(sha) - stream.read() - total_size += stream.size - count += 1 - elapsed = time() - st - total_kib = total_size / 1000 - print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed) - + #{ Configuration + PackedODBCls = PurePackedODB + #} END configuration + + def test_pack_writing_note(self): + sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info") + raise SkipTest() + + def test_pack_writing(self): + # see how fast we can write a pack from object streams. + # This will not be fast, as we take time for decompressing the streams as well + # For now we test the fast streaming and slow streaming versions manually + ostream = CountedNullStream() + # NOTE: We use the same repo twice to see whether OS caching helps + for rorepo in (self.rorepo, self.rorepo, self.ropdb): + + ni = 5000 + count = 0 + total_size = 0 + st = time() + for sha in rorepo.sha_iter(): + count += 1 + rorepo.stream(sha) + if count == ni: + break + #END gather objects for pack-writing + elapsed = time() - st + print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (count, rorepo.__class__.__name__, elapsed, count / elapsed) + + st = time() + PackEntity.write_pack((rorepo.stream(sha) for sha in rorepo.sha_iter()), ostream.write, object_count=ni) + elapsed = time() - st + total_kb = ostream.bytes_written() / 1000 + print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) + #END for each rorepo + + + def test_stream_reading(self): + raise SkipTest("This test was only used for --with-profile runs") + pdb = self.ropdb + + # streaming only, meant for --with-profile runs + ni = 5000 + count = 0 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in pdb.sha_iter(): + if count == ni: + break + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + count += 1 + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed) + |