summaryrefslogtreecommitdiff
path: root/test/git
diff options
context:
space:
mode:
Diffstat (limited to 'test/git')
-rw-r--r--test/git/performance/test_commit.py2
-rw-r--r--test/git/performance/test_streams.py12
-rw-r--r--test/git/performance/test_utils.py15
-rw-r--r--test/git/test_commit.py12
-rw-r--r--test/git/test_odb.py52
5 files changed, 64 insertions, 29 deletions
diff --git a/test/git/performance/test_commit.py b/test/git/performance/test_commit.py
index bca3ad8b..0571d0d9 100644
--- a/test/git/performance/test_commit.py
+++ b/test/git/performance/test_commit.py
@@ -91,7 +91,7 @@ class TestPerformance(TestBigRepoRW):
slen = stream.tell()
stream.seek(0)
- cm.sha = make_object(Commit.type, slen, stream)
+ cm.sha = make_object(IStream(Commit.type, slen, stream)).sha
# END commit creation
elapsed = time() - st
diff --git a/test/git/performance/test_streams.py b/test/git/performance/test_streams.py
index 30fd8048..01ec9fc4 100644
--- a/test/git/performance/test_streams.py
+++ b/test/git/performance/test_streams.py
@@ -1,7 +1,7 @@
"""Performance data streaming performance"""
from test.testlib import *
-from git.odb.db import *
+from git.odb import *
from array import array
from cStringIO import StringIO
@@ -51,7 +51,7 @@ class TestObjDBPerformance(TestBigRepoR):
# writing - due to the compression it will seem faster than it is
st = time()
- sha = ldb.store('blob', size, stream)
+ sha = ldb.store(IStream('blob', size, stream)).sha
elapsed_add = time() - st
assert ldb.has_object(sha)
db_file = ldb.readable_db_object_path(sha)
@@ -63,8 +63,8 @@ class TestObjDBPerformance(TestBigRepoR):
# reading all at once
st = time()
- type, size, shastream = ldbstreamsha)
- shadata = shastream.read()
+ ostream = ldb.stream(sha)
+ shadata = ostream.read()
elapsed_readall = time() - st
stream.seek(0)
@@ -76,9 +76,9 @@ class TestObjDBPerformance(TestBigRepoR):
cs = 512*1000
chunks = list()
st = time()
- type, size, shastream = ldbstreamsha)
+ ostream = ldb.stream(sha)
while True:
- data = shastream.read(cs)
+ data = ostream.read(cs)
chunks.append(data)
if len(data) < cs:
break
diff --git a/test/git/performance/test_utils.py b/test/git/performance/test_utils.py
index 47366d34..76adffec 100644
--- a/test/git/performance/test_utils.py
+++ b/test/git/performance/test_utils.py
@@ -42,3 +42,18 @@ class TestUtilPerformance(TestBigRepoR):
elapsed = time() - st
print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (cls.__name__, ni, elapsed, ni / elapsed)
# END for each class type
+
+ # check num of sequence-acceses
+ for cls in (list, tuple):
+ x = 10
+ st = time()
+ s = cls(range(x))
+ for i in xrange(ni):
+ s[0]
+ s[1]
+ s[2]
+ # END for
+ elapsed = time() - st
+ na = ni * 3
+ print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (cls.__name__, na, elapsed, na / elapsed)
+ # END for each sequence
diff --git a/test/git/test_commit.py b/test/git/test_commit.py
index e914b9a7..e65e2e59 100644
--- a/test/git/test_commit.py
+++ b/test/git/test_commit.py
@@ -6,6 +6,7 @@
from test.testlib import *
from git import *
+from git.odb import IStream
from cStringIO import StringIO
import time
@@ -31,8 +32,8 @@ def assert_commit_serialization(rwrepo, commit_id, print_performance_info=False)
streamlen = stream.tell()
stream.seek(0)
- csha = rwrepo.odb.store(Commit.type, streamlen, stream)
- assert csha == cm.sha
+ istream = rwrepo.odb.store(IStream(Commit.type, streamlen, stream))
+ assert istream.sha == cm.sha
nc = Commit(rwrepo, Commit.NULL_HEX_SHA, cm.tree.sha,
cm.author, cm.authored_date, cm.author_tz_offset,
@@ -45,7 +46,12 @@ def assert_commit_serialization(rwrepo, commit_id, print_performance_info=False)
ns += 1
streamlen = stream.tell()
stream.seek(0)
- nc.sha = rwrepo.odb.store(Commit.type, streamlen, stream)
+
+ # reuse istream
+ istream.size = streamlen
+ istream.stream = stream
+ istream.sha = None
+ nc.sha = rwrepo.odb.store(istream).sha
# if it worked, we have exactly the same contents !
assert nc.sha == cm.sha
diff --git a/test/git/test_odb.py b/test/git/test_odb.py
index 80597df6..c3a03714 100644
--- a/test/git/test_odb.py
+++ b/test/git/test_odb.py
@@ -1,7 +1,8 @@
"""Test for object db"""
from test.testlib import *
-from git.odb.db import *
+from git.odb import *
+from git.odb.stream import Sha1Writer
from git import Blob
from git.errors import BadObject
@@ -20,26 +21,39 @@ class TestDB(TestBase):
def _assert_object_writing(self, db):
"""General tests to verify object writing, compatible to ObjectDBW
:note: requires write access to the database"""
- # start in dry-run mode
- for dry_run in range(1, -1, -1):
+ # start in 'dry-run' mode, using a simple sha1 writer
+ ostreams = (Sha1Writer, None)
+ for ostreamcls in ostreams:
for data in self.all_data:
- for hex_sha in range(2):
- sha = db.store(Blob.type, len(data), StringIO(data), dry_run, hex_sha)
- assert db.has_object(sha) != dry_run
- assert len(sha) == 20 + hex_sha * 20
+ dry_run = ostreamcls is not None
+ ostream = None
+ if ostreamcls is not None:
+ ostream = ostreamcls()
+ # END create ostream
+
+ prev_ostream = db.set_ostream(ostream)
+ assert type(prev_ostream) in ostreams or prev_ostream in ostreams
+
+ istream = IStream(Blob.type, len(data), StringIO(data))
+ my_istream = db.store(istream)
+ sha = istream.sha
+ assert my_istream is istream
+ assert db.has_object(sha) != dry_run
+ assert len(sha) == 40 # for now we require 40 byte shas as default
+
+ # verify data - the slow way, we want to run code
+ if not dry_run:
+ info = db.info(sha)
+ assert Blob.type == info.type
+ assert info.size == len(data)
- # verify data - the slow way, we want to run code
- if not dry_run:
- type, size = db.info(sha)
- assert Blob.type == type
- assert size == len(data)
-
- type, size, stream = dbstreamsha)
- assert stream.read() == data
- else:
- self.failUnlessRaises(BadObject, db.info, sha)
- self.failUnlessRaises(BadObject, db.object, sha)
- # END for each sha type
+ ostream = db.stream(sha)
+ assert ostream.read() == data
+ assert ostream.type == Blob.type
+ assert ostream.size == len(data)
+ else:
+ self.failUnlessRaises(BadObject, db.info, sha)
+ self.failUnlessRaises(BadObject, db.stream, sha)
# END for each data set
# END for each dry_run mode