summaryrefslogtreecommitdiff
path: root/gitdb/test/performance
diff options
context:
space:
mode:
authorSebastian Thiel <byronimo@gmail.com>2011-05-05 19:44:36 +0200
committerSebastian Thiel <byronimo@gmail.com>2011-05-05 19:44:36 +0200
commite03093dd392b92f51b7d7cf66d7b1949b9f843e6 (patch)
treedbd2b85d8b201cb5f2848b3aaa11cffbc96031c2 /gitdb/test/performance
parent6463c10db377573e695bc504a9451bdb6cbf61f5 (diff)
downloadgitdbmerger.tar.gz
Removed plenty of code which went into git-python. This is just for completeness, gitdb doesn't need to be worked on anymoregitdbmerger
Diffstat (limited to 'gitdb/test/performance')
-rw-r--r--gitdb/test/performance/lib.py48
-rw-r--r--gitdb/test/performance/test_pack.py89
-rw-r--r--gitdb/test/performance/test_stream.py189
3 files changed, 0 insertions, 326 deletions
diff --git a/gitdb/test/performance/lib.py b/gitdb/test/performance/lib.py
index 761113d..cb6303f 100644
--- a/gitdb/test/performance/lib.py
+++ b/gitdb/test/performance/lib.py
@@ -3,52 +3,4 @@
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Contains library functions"""
-import os
-from gitdb.test.lib import *
-import shutil
-import tempfile
-
-#{ Invvariants
-k_env_git_repo = "GITDB_TEST_GIT_REPO_BASE"
-#} END invariants
-
-
-#{ Utilities
-def resolve_or_fail(env_var):
- """:return: resolved environment variable or raise EnvironmentError"""
- try:
- return os.environ[env_var]
- except KeyError:
- raise EnvironmentError("Please set the %r envrionment variable and retry" % env_var)
- # END exception handling
-
-#} END utilities
-
-
-#{ Base Classes
-
-class TestBigRepoR(TestBase):
- """TestCase providing access to readonly 'big' repositories using the following
- member variables:
-
- * gitrepopath
-
- * read-only base path of the git source repository, i.e. .../git/.git"""
-
- #{ Invariants
- head_sha_2k = '235d521da60e4699e5bd59ac658b5b48bd76ddca'
- head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5'
- #} END invariants
-
- @classmethod
- def setUpAll(cls):
- try:
- super(TestBigRepoR, cls).setUpAll()
- except AttributeError:
- pass
- cls.gitrepopath = resolve_or_fail(k_env_git_repo)
- assert cls.gitrepopath.endswith('.git')
-
-
-#} END base classes
diff --git a/gitdb/test/performance/test_pack.py b/gitdb/test/performance/test_pack.py
index da952b1..8b13789 100644
--- a/gitdb/test/performance/test_pack.py
+++ b/gitdb/test/performance/test_pack.py
@@ -1,90 +1 @@
-# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
-#
-# This module is part of GitDB and is released under
-# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
-"""Performance tests for object store"""
-from lib import (
- TestBigRepoR
- )
-from gitdb.exc import UnsupportedOperation
-from gitdb.db.pack import PackedDB
-
-import sys
-import os
-from time import time
-import random
-
-class TestPackedDBPerformance(TestBigRepoR):
-
- def _test_pack_random_access(self):
- pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack"))
-
- # sha lookup
- st = time()
- sha_list = list(pdb.sha_iter())
- elapsed = time() - st
- ns = len(sha_list)
- print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
-
- # sha lookup: best-case and worst case access
- pdb_pack_info = pdb._pack_info
- # END shuffle shas
- st = time()
- for sha in sha_list:
- pdb_pack_info(sha)
- # END for each sha to look up
- elapsed = time() - st
-
- # discard cache
- del(pdb._entities)
- pdb.entities()
- print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
- # END for each random mode
-
- # query info and streams only
- max_items = 10000 # can wait longer when testing memory
- for pdb_fun in (pdb.info, pdb.stream):
- st = time()
- for sha in sha_list[:max_items]:
- pdb_fun(sha)
- elapsed = time() - st
- print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
- # END for each function
-
- # retrieve stream and read all
- max_items = 5000
- pdb_stream = pdb.stream
- total_size = 0
- st = time()
- for sha in sha_list[:max_items]:
- stream = pdb_stream(sha)
- stream.read()
- total_size += stream.size
- elapsed = time() - st
- total_kib = total_size / 1000
- print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
-
- def test_correctness(self):
- pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack"))
- # disabled for now as it used to work perfectly, checking big repositories takes a long time
- print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)"
- for crc in range(2):
- count = 0
- st = time()
- for entity in pdb.entities():
- pack_verify = entity.is_valid_stream
- sha_by_index = entity.index().sha
- for index in xrange(entity.index().size()):
- try:
- assert pack_verify(sha_by_index(index), use_crc=crc)
- count += 1
- except UnsupportedOperation:
- pass
- # END ignore old indices
- # END for each index
- # END for each entity
- elapsed = time() - st
- print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
- # END for each verify mode
-
diff --git a/gitdb/test/performance/test_stream.py b/gitdb/test/performance/test_stream.py
index 0e47484..d64346e 100644
--- a/gitdb/test/performance/test_stream.py
+++ b/gitdb/test/performance/test_stream.py
@@ -3,193 +3,4 @@
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Performance data streaming performance"""
-from lib import TestBigRepoR
-from gitdb.db.py import *
-from gitdb.base import *
-from gitdb.stream import *
-from gitdb.util import (
- pool,
- bin_to_hex
- )
-from gitdb.typ import str_blob_type
-from gitdb.fun import chunk_size
-from async import (
- IteratorReader,
- ChannelThreadTask,
- )
-
-from cStringIO import StringIO
-from time import time
-import os
-import sys
-import stat
-import subprocess
-
-
-from lib import (
- TestBigRepoR,
- make_memory_file,
- with_rw_directory
- )
-
-
-#{ Utilities
-def read_chunked_stream(stream):
- total = 0
- while True:
- chunk = stream.read(chunk_size)
- total += len(chunk)
- if len(chunk) < chunk_size:
- break
- # END read stream loop
- assert total == stream.size
- return stream
-
-
-class TestStreamReader(ChannelThreadTask):
- """Expects input streams and reads them in chunks. It will read one at a time,
- requireing a queue chunk of size 1"""
- def __init__(self, *args):
- super(TestStreamReader, self).__init__(*args)
- self.fun = read_chunked_stream
- self.max_chunksize = 1
-
-
-#} END utilities
-
-class TestObjDBPerformance(TestBigRepoR):
-
- large_data_size_bytes = 1000*1000*50 # some MiB should do it
- moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
-
- @with_rw_directory
- def test_large_data_streaming(self, path):
- ldb = PureLooseObjectODB(path)
- string_ios = list() # list of streams we previously created
-
- # serial mode
- for randomize in range(2):
- desc = (randomize and 'random ') or ''
- print >> sys.stderr, "Creating %s data ..." % desc
- st = time()
- size, stream = make_memory_file(self.large_data_size_bytes, randomize)
- elapsed = time() - st
- print >> sys.stderr, "Done (in %f s)" % elapsed
- string_ios.append(stream)
-
- # writing - due to the compression it will seem faster than it is
- st = time()
- sha = ldb.store(IStream('blob', size, stream)).binsha
- elapsed_add = time() - st
- assert ldb.has_object(sha)
- db_file = ldb.readable_db_object_path(bin_to_hex(sha))
- fsize_kib = os.path.getsize(db_file) / 1000
-
-
- size_kib = size / 1000
- print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
-
- # reading all at once
- st = time()
- ostream = ldb.stream(sha)
- shadata = ostream.read()
- elapsed_readall = time() - st
-
- stream.seek(0)
- assert shadata == stream.getvalue()
- print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
-
-
- # reading in chunks of 1 MiB
- cs = 512*1000
- chunks = list()
- st = time()
- ostream = ldb.stream(sha)
- while True:
- data = ostream.read(cs)
- chunks.append(data)
- if len(data) < cs:
- break
- # END read in chunks
- elapsed_readchunks = time() - st
-
- stream.seek(0)
- assert ''.join(chunks) == stream.getvalue()
-
- cs_kib = cs / 1000
- print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
-
- # del db file so we keep something to do
- os.remove(db_file)
- # END for each randomization factor
-
-
- # multi-threaded mode
- # want two, should be supported by most of todays cpus
- pool.set_size(2)
- total_kib = 0
- nsios = len(string_ios)
- for stream in string_ios:
- stream.seek(0)
- total_kib += len(stream.getvalue()) / 1000
- # END rewind
-
- def istream_iter():
- for stream in string_ios:
- stream.seek(0)
- yield IStream(str_blob_type, len(stream.getvalue()), stream)
- # END for each stream
- # END util
-
- # write multiple objects at once, involving concurrent compression
- reader = IteratorReader(istream_iter())
- istream_reader = ldb.store_async(reader)
- istream_reader.task().max_chunksize = 1
-
- st = time()
- istreams = istream_reader.read(nsios)
- assert len(istreams) == nsios
- elapsed = time() - st
-
- print >> sys.stderr, "Threads(%i): Compressed %i KiB of data in loose odb in %f s ( %f Write KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)
-
- # decompress multiple at once, by reading them
- # chunk size is not important as the stream will not really be decompressed
-
- # until its read
- istream_reader = IteratorReader(iter([ i.binsha for i in istreams ]))
- ostream_reader = ldb.stream_async(istream_reader)
-
- chunk_task = TestStreamReader(ostream_reader, "chunker", None)
- output_reader = pool.add_task(chunk_task)
- output_reader.task().max_chunksize = 1
-
- st = time()
- assert len(output_reader.read(nsios)) == nsios
- elapsed = time() - st
-
- print >> sys.stderr, "Threads(%i): Decompressed %i KiB of data in loose odb in %f s ( %f Read KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)
-
- # store the files, and read them back. For the reading, we use a task
- # as well which is chunked into one item per task. Reading all will
- # very quickly result in two threads handling two bytestreams of
- # chained compression/decompression streams
- reader = IteratorReader(istream_iter())
- istream_reader = ldb.store_async(reader)
- istream_reader.task().max_chunksize = 1
-
- istream_to_sha = lambda items: [ i.binsha for i in items ]
- istream_reader.set_post_cb(istream_to_sha)
-
- ostream_reader = ldb.stream_async(istream_reader)
-
- chunk_task = TestStreamReader(ostream_reader, "chunker", None)
- output_reader = pool.add_task(chunk_task)
- output_reader.max_chunksize = 1
-
- st = time()
- assert len(output_reader.read(nsios)) == nsios
- elapsed = time() - st
-
- print >> sys.stderr, "Threads(%i): Compressed and decompressed and read %i KiB of data in loose odb in %f s ( %f Combined KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)