summaryrefslogtreecommitdiff
path: root/git/test/performance
diff options
context:
space:
mode:
Diffstat (limited to 'git/test/performance')
-rw-r--r--git/test/performance/test_commit.py16
-rw-r--r--git/test/performance/test_odb.py14
-rw-r--r--git/test/performance/test_streams.py35
-rw-r--r--git/test/performance/test_utils.py44
4 files changed, 60 insertions, 49 deletions
diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py
index 257442ea..a890c833 100644
--- a/git/test/performance/test_commit.py
+++ b/git/test/performance/test_commit.py
@@ -3,7 +3,7 @@
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
-
+from __future__ import print_function
from .lib import TestBigRepoRW
from git import Commit
from gitdb import IStream
@@ -46,8 +46,8 @@ class TestPerformance(TestBigRepoRW):
# END for each object
# END for each commit
elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (
- nc, no, elapsed_time, no / elapsed_time)
+ print("Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )"
+ % (nc, no, elapsed_time, no / elapsed_time), file=sys.stderr)
def test_commit_traversal(self):
# bound to cat-file parsing performance
@@ -58,7 +58,8 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
+ print("Traversed %i Commits in %s [s] ( %f commits/s )"
+ % (nc, elapsed_time, nc / elapsed_time), file=sys.stderr)
def test_commit_iteration(self):
# bound to stream parsing performance
@@ -69,7 +70,8 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
+ print("Iterated %i Commits in %s [s] ( %f commits/s )"
+ % (nc, elapsed_time, nc / elapsed_time), file=sys.stderr)
def test_commit_serialization(self):
assert_commit_serialization(self.gitrwrepo, self.gitrwrepo.head, True)
@@ -97,5 +99,5 @@ class TestPerformance(TestBigRepoRW):
# END commit creation
elapsed = time() - st
- print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (
- nc, elapsed, nc / elapsed)
+ print("Serialized %i commits to loose objects in %f s ( %f commits / s )"
+ % (nc, elapsed, nc / elapsed), file=sys.stderr)
diff --git a/git/test/performance/test_odb.py b/git/test/performance/test_odb.py
index 779adb00..b14e6db0 100644
--- a/git/test/performance/test_odb.py
+++ b/git/test/performance/test_odb.py
@@ -1,5 +1,5 @@
"""Performance tests for object store"""
-
+from __future__ import print_function
from time import time
import sys
@@ -20,8 +20,8 @@ class TestObjDBPerformance(TestBigRepoR):
nc = len(commits)
elapsed = time() - st
- print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (
- type(repo.odb), nc, elapsed, nc / elapsed)
+ print("%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )"
+ % (type(repo.odb), nc, elapsed, nc / elapsed), file=sys.stderr)
results[0].append(elapsed)
# GET TREES
@@ -42,8 +42,8 @@ class TestObjDBPerformance(TestBigRepoR):
# END for each commit
elapsed = time() - st
- print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (
- type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
+ print("%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )"
+ % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed), file=sys.stderr)
results[1].append(elapsed)
# GET BLOBS
@@ -63,11 +63,11 @@ class TestObjDBPerformance(TestBigRepoR):
msg = "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )"\
% (type(repo.odb), nb, data_bytes / 1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
+ print(msg, file=sys.stderr)
results[2].append(elapsed)
- print >> sys.stderr, msg
# END for each repo type
# final results
for test_name, a, b in results:
- print >> sys.stderr, "%s: %f s vs %f s, pure is %f times slower" % (test_name, a, b, b / a)
+ print("%s: %f s vs %f s, pure is %f times slower" % (test_name, a, b, b / a), file=sys.stderr)
# END for each result
diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py
index 48527f0e..ff664c10 100644
--- a/git/test/performance/test_streams.py
+++ b/git/test/performance/test_streams.py
@@ -1,4 +1,6 @@
"""Performance data streaming performance"""
+from __future__ import print_function
+
from time import time
import os
import sys
@@ -19,6 +21,7 @@ from gitdb import (
IStream
)
+
class TestObjDBPerformance(TestBigRepoR):
large_data_size_bytes = 1000 * 1000 * 10 # some MiB should do it
@@ -32,11 +35,11 @@ class TestObjDBPerformance(TestBigRepoR):
for randomize in range(2):
desc = (randomize and 'random ') or ''
- print >> sys.stderr, "Creating %s data ..." % desc
+ print("Creating %s data ..." % desc, file=sys.stderr)
st = time()
size, stream = make_memory_file(self.large_data_size_bytes, randomize)
elapsed = time() - st
- print >> sys.stderr, "Done (in %f s)" % elapsed
+ print("Done (in %f s)" % elapsed, file=sys.stderr)
# writing - due to the compression it will seem faster than it is
st = time()
@@ -49,7 +52,7 @@ class TestObjDBPerformance(TestBigRepoR):
size_kib = size / 1000
msg = "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)"
msg %= (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
- print >> sys.stderr, msg
+ print(msg, file=sys.stderr)
# reading all at once
st = time()
@@ -61,7 +64,7 @@ class TestObjDBPerformance(TestBigRepoR):
assert shadata == stream.getvalue()
msg = "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)"
msg %= (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
- print >> sys.stderr, msg
+ print(msg, file=sys.stderr)
# reading in chunks of 1 MiB
cs = 512 * 1000
@@ -80,8 +83,8 @@ class TestObjDBPerformance(TestBigRepoR):
assert ''.join(chunks) == stream.getvalue()
cs_kib = cs / 1000
- print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (
- size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
+ print("Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)"
+ % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks), file=sys.stderr)
# del db file so git has something to do
os.remove(db_file)
@@ -106,22 +109,22 @@ class TestObjDBPerformance(TestBigRepoR):
fsize_kib = os.path.getsize(db_file) / 1000
msg = "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)"
msg %= (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add)
- print >> sys.stderr, msg
+ print(msg, file=sys.stderr)
# compare ...
- print >> sys.stderr, "Git-Python is %f %% faster than git when adding big %s files" % (
- 100.0 - (elapsed_add / gelapsed_add) * 100, desc)
+ print("Git-Python is %f %% faster than git when adding big %s files"
+ % (100.0 - (elapsed_add / gelapsed_add) * 100, desc), file=sys.stderr)
# read all
st = time()
s, t, size, data = rwrepo.git.get_object_data(gitsha)
gelapsed_readall = time() - st
- print >> sys.stderr, "Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (
- size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall)
+ print("Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)"
+ % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall), file=sys.stderr)
# compare
- print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %sfiles" % (
- 100.0 - (elapsed_readall / gelapsed_readall) * 100, desc)
+ print("Git-Python is %f %% faster than git when reading big %sfiles"
+ % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc), file=sys.stderr)
# read chunks
st = time()
@@ -134,9 +137,9 @@ class TestObjDBPerformance(TestBigRepoR):
gelapsed_readchunks = time() - st
msg = "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)"
msg %= (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks)
- print >> sys.stderr, msg
+ print(msg, file=sys.stderr)
# compare
- print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %s files in chunks" % (
- 100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc)
+ print ("Git-Python is %f %% faster than git when reading big %s files in chunks"
+ % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc), file=sys.stderr)
# END for each randomization factor
diff --git a/git/test/performance/test_utils.py b/git/test/performance/test_utils.py
index 0bd47098..af8e8047 100644
--- a/git/test/performance/test_utils.py
+++ b/git/test/performance/test_utils.py
@@ -1,4 +1,5 @@
"""Performance of utilities"""
+from __future__ import print_function
from time import time
import sys
@@ -43,8 +44,8 @@ class TestUtilPerformance(TestBigRepoR):
cli.attr
# END for each access
elapsed = time() - st
- print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (
- cls.__name__, ni, elapsed, ni / elapsed)
+ print("Accessed %s.attr %i times in %s s ( %f acc / s)"
+ % (cls.__name__, ni, elapsed, ni / elapsed), file=sys.stderr)
# END for each class type
# check num of sequence-acceses
@@ -59,8 +60,8 @@ class TestUtilPerformance(TestBigRepoR):
# END for
elapsed = time() - st
na = ni * 3
- print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (
- cls.__name__, na, elapsed, na / elapsed)
+ print("Accessed %s[x] %i times in %s s ( %f acc / s)"
+ % (cls.__name__, na, elapsed, na / elapsed), file=sys.stderr)
# END for each sequence
def test_instantiation(self):
@@ -85,8 +86,8 @@ class TestUtilPerformance(TestBigRepoR):
# END handle empty cls
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (
- ni, cls.__name__, mni, elapsed, ni / elapsed)
+ print("Created %i %ss of size %i in %f s ( %f inst / s)"
+ % (ni, cls.__name__, mni, elapsed, ni / elapsed), file=sys.stderr)
# END for each type
# END for each item count
@@ -96,14 +97,16 @@ class TestUtilPerformance(TestBigRepoR):
(1, 2, 3, 4)
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+ print("Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)
st = time()
for i in xrange(ni):
tuple((1, 2, 3, 4))
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+ print("Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)
def test_unpacking_vs_indexing(self):
ni = 1000000
@@ -116,24 +119,24 @@ class TestUtilPerformance(TestBigRepoR):
one, two, three, four = sequence
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (
- ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ print("Unpacked %i %ss of size %i in %f s ( %f acc / s)"
+ % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed), file=sys.stderr)
st = time()
for i in xrange(ni):
one, two, three, four = sequence[0], sequence[1], sequence[2], sequence[3]
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (
- ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ print("Unpacked %i %ss of size %i individually in %f s ( %f acc / s)"
+ % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed), file=sys.stderr)
st = time()
for i in xrange(ni):
one, two = sequence[0], sequence[1]
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (
- ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ print("Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)"
+ % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed), file=sys.stderr)
# END for each sequence
def test_large_list_vs_iteration(self):
@@ -150,14 +153,16 @@ class TestUtilPerformance(TestBigRepoR):
i
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Iterated %i items from list in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+ print("Iterated %i items from list in %f s ( %f acc / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)
st = time()
for i in slow_iter(ni):
i
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Iterated %i items from iterator in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+ print("Iterated %i items from iterator in %f s ( %f acc / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)
# END for each number of iterations
def test_type_vs_inst_class(self):
@@ -173,12 +178,13 @@ class TestUtilPerformance(TestBigRepoR):
inst.__class__()
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (
- ni, elapsed, ni / elapsed)
+ print("Created %i items using inst.__class__ in %f s ( %f items / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)
st = time()
for i in xrange(ni):
type(inst)()
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i items using type(inst)() in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
+ print("Created %i items using type(inst)() in %f s ( %f items / s)"
+ % (ni, elapsed, ni / elapsed), file=sys.stderr)