summaryrefslogtreecommitdiff
path: root/git/index/fun.py
diff options
context:
space:
mode:
Diffstat (limited to 'git/index/fun.py')
-rw-r--r--git/index/fun.py44
1 files changed, 22 insertions, 22 deletions
diff --git a/git/index/fun.py b/git/index/fun.py
index e39b09d6..ede7e43f 100644
--- a/git/index/fun.py
+++ b/git/index/fun.py
@@ -31,7 +31,7 @@ from typ import (
)
CE_NAMEMASK_INV = ~CE_NAMEMASK
-from util import (
+from util import (
pack,
unpack
)
@@ -55,19 +55,19 @@ def stat_mode_to_index_mode(mode):
def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer):
"""Write the cache represented by entries to a stream
-
+
:param entries: **sorted** list of entries
:param stream: stream to wrap into the AdapterStreamCls - it is used for
final output.
-
+
:param ShaStreamCls: Type to use when writing to the stream. It produces a sha
while writing to it, before the data is passed on to the wrapped stream
-
+
:param extension_data: any kind of data to write as a trailer, it must begin
a 4 byte identifier, followed by its size ( 4 bytes )"""
# wrap the stream into a compatible writer
stream = ShaStreamCls(stream)
-
+
tell = stream.tell
write = stream.write
@@ -98,14 +98,14 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1
# write the sha over the content
stream.write_sha()
-
+
def read_header(stream):
"""Return tuple(version_long, num_entries) from the given stream"""
type_id = stream.read(4)
if type_id != "DIRC":
raise AssertionError("Invalid index file header: %r" % type_id)
version, num_entries = unpack(">LL", stream.read(4 * 2))
-
+
# TODO: handle version 3: extended data, see read-cache.c
assert version in (1, 2)
return version, num_entries
@@ -130,7 +130,7 @@ def read_cache(stream):
version, num_entries = read_header(stream)
count = 0
entries = dict()
-
+
read = stream.read
tell = stream.tell
while count < num_entries:
@@ -141,7 +141,7 @@ def read_cache(stream):
unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
path_size = flags & CE_NAMEMASK
path = read(path_size)
-
+
real_size = ((tell() - beginoffset + 8) & ~7)
data = read((beginoffset + real_size) - tell())
entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
@@ -163,13 +163,13 @@ def read_cache(stream):
# truncate the sha in the end as we will dynamically create it anyway
extension_data = extension_data[:-20]
-
+
return (version, entries, extension_data, content_sha)
-
+
def write_tree_from_cache(entries, odb, sl, si=0):
"""Create a tree from the given sorted list of entries and put the respective
trees into the given object database
-
+
:param entries: **sorted** list of IndexEntries
:param odb: object database to store the trees in
:param si: start index at which we should start creating subtrees
@@ -202,28 +202,28 @@ def write_tree_from_cache(entries, odb, sl, si=0):
# END abort on base mismatch
xi += 1
# END find common base
-
+
# enter recursion
# ci - 1 as we want to count our current item as well
sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci-1, xi), rbound+1)
tree_items_append((sha, S_IFDIR, base))
-
+
# skip ahead
ci = xi
# END handle bounds
# END for each entry
-
+
# finally create the tree
sio = StringIO()
tree_to_stream(tree_items, sio.write)
sio.seek(0)
-
+
istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio))
return (istream.binsha, tree_items)
-
+
def _tree_entry_to_baseindexentry(tree_entry, stage):
- return BaseIndexEntry((tree_entry[1], tree_entry[0], stage <<CE_STAGESHIFT, tree_entry[2]))
-
+ return BaseIndexEntry((tree_entry[1], tree_entry[0], stage << CE_STAGESHIFT, tree_entry[2]))
+
def aggressive_tree_merge(odb, tree_shas):
"""
:return: list of BaseIndexEntries representing the aggressive merge of the given
@@ -235,7 +235,7 @@ def aggressive_tree_merge(odb, tree_shas):
If 3 are given, a 3 way merge is performed"""
out = list()
out_append = out.append
-
+
# one and two way is the same for us, as we don't have to handle an existing
# index, instrea
if len(tree_shas) in (1,2):
@@ -244,7 +244,7 @@ def aggressive_tree_merge(odb, tree_shas):
# END for each entry
return out
# END handle single tree
-
+
if len(tree_shas) > 3:
raise ValueError("Cannot handle %i trees at once" % len(tree_shas))
@@ -273,7 +273,7 @@ def aggressive_tree_merge(odb, tree_shas):
out_append(_tree_entry_to_baseindexentry(theirs, 0))
# END handle modification
else:
-
+
if ours[0] != base[0] or ours[1] != base[1]:
# they deleted it, we changed it, conflict
out_append(_tree_entry_to_baseindexentry(base, 1))