summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/RelNotes/2.10.2.txt66
-rw-r--r--Documentation/RelNotes/2.11.0.txt105
-rw-r--r--Documentation/config.txt2
-rw-r--r--Documentation/diff-options.txt8
-rw-r--r--Documentation/git-fmt-merge-msg.txt4
-rw-r--r--Documentation/git-interpret-trailers.txt14
-rw-r--r--Documentation/git-stash.txt3
-rw-r--r--Documentation/git.txt3
-rw-r--r--Documentation/gitattributes.txt157
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--builtin/archive.c4
-rw-r--r--builtin/commit.c29
-rw-r--r--builtin/fetch.c34
-rw-r--r--builtin/pack-objects.c2
-rw-r--r--builtin/receive-pack.c4
-rw-r--r--builtin/remote-ext.c4
-rw-r--r--builtin/rev-parse.c5
-rw-r--r--builtin/revert.c46
-rw-r--r--builtin/submodule--helper.c8
-rw-r--r--builtin/upload-archive.c4
-rw-r--r--cache.h11
-rw-r--r--connect.c2
-rw-r--r--contrib/coccinelle/object_id.cocci15
-rwxr-xr-xcontrib/long-running-filter/example.pl128
-rw-r--r--convert.c376
-rw-r--r--daemon.c27
-rw-r--r--diff-lib.c18
-rw-r--r--diff.c10
-rw-r--r--diff.h3
-rw-r--r--environment.c2
-rw-r--r--git-compat-util.h4
-rw-r--r--git-sh-setup.sh6
-rwxr-xr-xgit-stash.sh15
-rwxr-xr-xgit-svn.perl13
-rw-r--r--hex.c3
-rw-r--r--http-backend.c2
-rw-r--r--pack-bitmap.c2
-rw-r--r--path.c3
-rw-r--r--perl/Git.pm16
-rw-r--r--perl/Git/SVN.pm36
-rw-r--r--perl/Git/SVN/Editor.pm12
-rw-r--r--perl/Git/SVN/Fetcher.pm15
-rw-r--r--perl/Git/SVN/Migration.pm37
-rw-r--r--pkt-line.c152
-rw-r--r--pkt-line.h12
-rw-r--r--read-cache.c9
-rw-r--r--run-command.c39
-rw-r--r--run-command.h4
-rw-r--r--sequencer.c682
-rw-r--r--sequencer.h23
-rw-r--r--sha1_file.c52
-rw-r--r--sha1_name.c36
-rw-r--r--shallow.c2
-rwxr-xr-xt/t0021-conversion.sh502
-rwxr-xr-xt/t0021/rot13-filter.pl192
-rwxr-xr-xt/t0060-path-utils.sh29
-rwxr-xr-xt/t2203-add-intent.sh41
-rwxr-xr-xt/t3501-revert-cherry-pick.sh2
-rwxr-xr-xt/t3903-stash.sh35
-rwxr-xr-xt/t7064-wtstatus-pv2.sh4
-rwxr-xr-xt/t7513-interpret-trailers.sh299
-rw-r--r--t/test-lib.sh9
-rwxr-xr-xt/valgrind/valgrind.sh12
-rwxr-xr-x[-rw-r--r--]templates/hooks--pre-receive.sample0
-rw-r--r--trailer.c620
-rw-r--r--transport.c84
-rw-r--r--transport.h3
-rw-r--r--upload-pack.c34
-rw-r--r--write_or_die.c13
-rw-r--r--wt-status.c7
70 files changed, 3289 insertions, 868 deletions
diff --git a/Documentation/RelNotes/2.10.2.txt b/Documentation/RelNotes/2.10.2.txt
index ed2de0dc20..c4d4397023 100644
--- a/Documentation/RelNotes/2.10.2.txt
+++ b/Documentation/RelNotes/2.10.2.txt
@@ -41,5 +41,71 @@ Fixes since v2.10.1
that were detected without dying itself, but under some conditions
it didn't and died instead, which has been fixed.
+ * When "git fetch" tries to find where the history of the repository
+ it runs in has diverged from what the other side has, it has a
+ mechanism to avoid digging too deep into irrelevant side branches.
+ This however did not work well over the "smart-http" transport due
+ to a design bug, which has been fixed.
+
+ * When we started cURL to talk to imap server when a new enough
+ version of cURL library is available, we forgot to explicitly add
+ imap(s):// before the destination. To some folks, that didn't work
+ and the library tried to make HTTP(s) requests instead.
+
+ * The ./configure script generated from configure.ac was taught how
+ to detect support of SSL by libcurl better.
+
+ * http.emptyauth configuration is a way to allow an empty username to
+ pass when attempting to authenticate using mechanisms like
+ Kerberos. We took an unspecified (NULL) username and sent ":"
+ (i.e. no username, no password) to CURLOPT_USERPWD, but did not do
+ the same when the username is explicitly set to an empty string.
+
+ * "git clone" of a local repository can be done at the filesystem
+ level, but the codepath did not check errors while copying and
+ adjusting the file that lists alternate object stores.
+
+ * Documentation for "git commit" was updated to clarify that "commit
+ -p <paths>" adds to the current contents of the index to come up
+ with what to commit.
+
+ * A stray symbolic link in $GIT_DIR/refs/ directory could make name
+ resolution loop forever, which has been corrected.
+
+ * The "submodule.<name>.path" stored in .gitmodules is never copied
+ to .git/config and such a key in .git/config has no meaning, but
+ the documentation described it and submodule.<name>.url next to
+ each other as if both belong to .git/config. This has been fixed.
+
+ * Recent git allows submodule.<name>.branch to use a special token
+ "." instead of the branch name; the documentation has been updated
+ to describe it.
+
+ * In a worktree connected to a repository elsewhere, created via "git
+ worktree", "git checkout" attempts to protect users from confusion
+ by refusing to check out a branch that is already checked out in
+ another worktree. However, this also prevented checking out a
+ branch, which is designated as the primary branch of a bare
+ reopsitory, in a worktree that is connected to the bare
+ repository. The check has been corrected to allow it.
+
+ * "git rebase" immediately after "git clone" failed to find the fork
+ point from the upstream.
+
+ * When fetching from a remote that has many tags that are irrelevant
+ to branches we are following, we used to waste way too many cycles
+ when checking if the object pointed at by a tag (that we are not
+ going to fetch!) exists in our repository too carefully.
+
+ * The Travis CI configuration we ship ran the tests with --verbose
+ option but this risks non-TAP output that happens to be "ok" to be
+ misinterpreted as TAP signalling a test that passed. This resulted
+ in unnecessary failure. This has been corrected by introducing a
+ new mode to run our tests in the test harness to send the verbose
+ output separately to the log file.
+
+ * Some AsciiDoc formatter mishandles a displayed illustration with
+ tabs in it. Adjust a few of them in merge-base documentation to
+ work around them.
Also contains minor documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.11.0.txt b/Documentation/RelNotes/2.11.0.txt
index f23759b0c0..f8e8e3f390 100644
--- a/Documentation/RelNotes/2.11.0.txt
+++ b/Documentation/RelNotes/2.11.0.txt
@@ -5,8 +5,9 @@ Backward compatibility notes.
* An empty string used as a pathspec element has always meant
'everything matches', but it is too easy to write a script that
- finds a path to remove in $path and run 'git rm "$paht"', which
- ends up removing everything. This release starts warning about the
+ finds a path to remove in $path and run 'git rm "$paht"' by
+ mistake (when the user meant to give "$path"), which ends up
+ removing everything. This release starts warning about the
use of an empty string that is used for 'everything matches' and
asks users to use a more explicit '.' for that instead.
@@ -14,11 +15,16 @@ Backward compatibility notes.
eventually the warning can be turned into a hard error, upgrading
the deprecation into removal of this (mis)feature.
-
* The historical argument order "git merge <msg> HEAD <commit>..."
has been deprecated for quite some time, and will be removed in the
next release (not this one).
+ * The default abbreviation length, which has historically been 7, now
+ scales as the repository grows, using the approximate number of
+ objects in the reopsitory and a bit of math around the birthday
+ paradox. The logic suggests to use 12 hexdigits for the Linux
+ kernel, and 9 to 10 for Git itself.
+
Updates since v2.10
-------------------
@@ -133,6 +139,24 @@ UI, Workflows & Features
learned to turn "git describe" output (e.g. v2.9.3-599-g2376d31787)
into clickable links in its output.
+ * When new paths were added by "git add -N" to the index, it was
+ enough to circumvent the check by "git commit" to refrain from
+ making an empty commit without "--allow-empty". The same logic
+ prevented "git status" to show such a path as "new file" in the
+ "Changes not staged for commit" section.
+
+ * The smudge/clean filter API expect an external process is spawned
+ to filter the contents for each path that has a filter defined. A
+ new type of "process" filter API has been added to allow the first
+ request to run the filter for a path to spawn a single process, and
+ all filtering need is served by this single process for multiple
+ paths, reducing the process creation overhead.
+
+ * The user always has to say "stash@{$N}" when naming a single
+ element in the default location of the stash, i.e. reflogs in
+ refs/stash. The "git stash" command learned to accept "git stash
+ apply 4" as a short-hand for "git stash apply stash@{4}".
+
Performance, Internal Implementation, Development Support etc.
@@ -212,6 +236,26 @@ Performance, Internal Implementation, Development Support etc.
by reducing use of timestamp-ordered commit-list, which was
replaced with a priority queue.
+ * "git diff --no-index" codepath has been updated not to try to peek
+ into .git/ directory that happens to be under the current
+ directory, when we know we are operating outside any repository.
+
+ * Update of the sequencer codebase to make it reusable to reimplement
+ "rebase -i" continues.
+
+ * Git generally does not explicitly close file descriptors that were
+ open in the parent process when spawning a child process, but most
+ of the time the child does not want to access them. As Windows does
+ not allow removing or renaming a file that has a file descriptor
+ open, a slow-to-exit child can even break the parent process by
+ holding onto them. Use O_CLOEXEC flag to open files in various
+ codepaths.
+
+ * Update "interpret-trailers" machinery and teaches it that people in
+ real world write all sorts of crufts in the "trailer" that was
+ originally designed to have the neat-o "Mail-Header: like thing"
+ and nothing else.
+
Also contains various documentation updates and code clean-ups.
@@ -323,7 +367,6 @@ notes for details).
* The pretty-format specifier "%C(auto)" used by the "log" family of
commands to enable coloring of the output is taught to also issue a
color-reset sequence to the output.
- (merge 82b83da8d3 rs/c-auto-resets-attributes later to maint).
* A shell script example in check-ref-format documentation has been
fixed.
@@ -340,7 +383,6 @@ notes for details).
beyond the end of the mapped region. This was fixed by introducing
a regexec_buf() helper that takes a <ptr,len> pair with REG_STARTEND
extension.
- (merge 842a516cb0 js/regexec-buf later to maint).
* The procedure to build Git on Mac OS X for Travis CI hardcoded the
internal directory structure we assumed HomeBrew uses, which was a
@@ -365,7 +407,6 @@ notes for details).
mechanism to avoid digging too deep into irrelevant side branches.
This however did not work well over the "smart-http" transport due
to a design bug, which has been fixed.
- (merge 06b3d386e0 jt/fetch-pack-in-vain-count-with-stateless later to maint).
* In the codepath that comes up with the hostname to be used in an
e-mail when the user didn't tell us, we looked at ai_canonname
@@ -415,11 +456,9 @@ notes for details).
version of cURL library is available, we forgot to explicitly add
imap(s):// before the destination. To some folks, that didn't work
and the library tried to make HTTP(s) requests instead.
- (merge d2d07ab861 ak/curl-imap-send-explicit-scheme later to maint).
* The ./configure script generated from configure.ac was taught how
to detect support of SSL by libcurl better.
- (merge 924b7eb1c9 dp/autoconf-curl-ssl later to maint).
* The command-line completion script (in contrib/) learned to
complete "git cmd ^mas<HT>" to complete the negative end of
@@ -446,27 +485,22 @@ notes for details).
Kerberos. We took an unspecified (NULL) username and sent ":"
(i.e. no username, no password) to CURLOPT_USERPWD, but did not do
the same when the username is explicitly set to an empty string.
- (merge 5275c3081c dt/http-empty-auth later to maint).
* "git clone" of a local repository can be done at the filesystem
level, but the codepath did not check errors while copying and
adjusting the file that lists alternate object stores.
- (merge 22d3b8de1b jk/clone-copy-alternates-fix later to maint).
* Documentation for "git commit" was updated to clarify that "commit
-p <paths>" adds to the current contents of the index to come up
with what to commit.
- (merge 7431596ab1 nd/commit-p-doc later to maint).
* A stray symbolic link in $GIT_DIR/refs/ directory could make name
resolution loop forever, which has been corrected.
- (merge e8c42cb9ce jk/ref-symlink-loop later to maint).
* The "submodule.<name>.path" stored in .gitmodules is never copied
to .git/config and such a key in .git/config has no meaning, but
the documentation described it and submodule.<name>.url next to
each other as if both belong to .git/config. This has been fixed.
- (merge 72710165c9 sb/submodule-config-doc-drop-path later to maint).
* In a worktree connected to a repository elsewhere, created via "git
worktree", "git checkout" attempts to protect users from confusion
@@ -475,29 +509,23 @@ notes for details).
branch, which is designated as the primary branch of a bare
reopsitory, in a worktree that is connected to the bare
repository. The check has been corrected to allow it.
- (merge 171c646f8c dk/worktree-dup-checkout-with-bare-is-ok later to maint).
* "git rebase" immediately after "git clone" failed to find the fork
point from the upstream.
- (merge 4f21454b55 jk/merge-base-fork-point-without-reflog later to maint).
* When fetching from a remote that has many tags that are irrelevant
to branches we are following, we used to waste way too many cycles
when checking if the object pointed at by a tag (that we are not
going to fetch!) exists in our repository too carefully.
- (merge 5827a03545 jk/fetch-quick-tag-following later to maint).
* Protect our code from over-eager compilers.
- (merge 0ac52a38e8 jk/tighten-alloc later to maint).
* Recent git allows submodule.<name>.branch to use a special token
"." instead of the branch name; the documentation has been updated
to describe it.
- (merge 15ef78008a bw/submodule-branch-dot-doc later to maint).
* A hot-fix for a test added by a recent topic that went to both
'master' and 'maint' already.
- (merge 76e368c378 tg/add-chmod+x-fix later to maint).
* "git send-email" attempts to pick up valid e-mails from the
trailers, but people in real world write non-addresses there, like
@@ -511,18 +539,39 @@ notes for details).
in unnecessary failure. This has been corrected by introducing a
new mode to run our tests in the test harness to send the verbose
output separately to the log file.
- (merge 614fe01521 jk/tap-verbose-fix later to maint).
* Some AsciiDoc formatter mishandles a displayed illustration with
tabs in it. Adjust a few of them in merge-base documentation to
work around them.
- (merge 6750f62699 po/fix-doc-merge-base-illustration later to maint).
+
+ * A minor regression fix for "git submodule" that was introduced
+ when more helper functions were reimplemented in C.
+ (merge 77b63ac31e sb/submodule-ignore-trailing-slash later to maint).
+
+ * The code that we have used for the past 10+ years to cycle
+ 4-element ring buffers turns out to be not quite portable in
+ theoretical world.
+ (merge bb84735c80 rs/ring-buffer-wraparound later to maint).
+
+ * "git daemon" used fixed-length buffers to turn URL to the
+ repository the client asked for into the server side directory
+ path, using snprintf() to avoid overflowing these buffers, but
+ allowed possibly truncated paths to the directory. This has been
+ tightened to reject such a request that causes overlong path to be
+ required to serve.
+ (merge 6bdb0083be jk/daemon-path-ok-check-truncation later to maint).
+
+ * Recent update to git-sh-setup (a library of shell functions that
+ are used by our in-tree scripted Porcelain commands) included
+ another shell library git-sh-i18n without specifying where it is,
+ relying on the $PATH. This has been fixed to be more explicit by
+ prefixing $(git --exec-path) output in front.
+ (merge 1073094f30 ak/sh-setup-dot-source-i18n-fix later to maint).
* Other minor doc, test and build updates and code cleanups.
- (merge a94bb68397 rs/cocci later to maint).
- (merge 641c900b2c js/reset-usage later to maint).
- (merge 30cfe72d37 rs/pretty-format-color-doc-fix later to maint).
- (merge d709f1fb9d jc/diff-unique-abbrev-comments later to maint).
- (merge 13092a916d jc/cocci-xstrdup-or-null later to maint).
- (merge 86009f32bb pb/test-parse-options-expect later to maint).
- (merge 749a2279a4 yk/git-tag-remove-mention-of-old-layout-in-doc later to maint).
+ (merge 5c238e29a8 jk/common-main later to maint).
+ (merge 5a5749e45b ak/pre-receive-hook-template-modefix later to maint).
+ (merge 6d834ac8f1 jk/rebase-config-insn-fmt-docfix later to maint).
+ (merge de9f7fa3b0 rs/commit-pptr-simplify later to maint).
+ (merge 4259d693fc sc/fmt-merge-msg-doc-markup-fix later to maint).
+ (merge 28fab7b23d nd/test-helpers later to maint).
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 27069ac032..a0ab66aae7 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -2450,7 +2450,7 @@ rebase.missingCommitsCheck::
command in the todo-list.
Defaults to "ignore".
-rebase.instructionFormat
+rebase.instructionFormat::
A format string, as specified in linkgit:git-log[1], to be used for
the instruction list during an interactive rebase. The format will automatically
have the long commit hash prepended to the format.
diff --git a/Documentation/diff-options.txt b/Documentation/diff-options.txt
index 29630c2389..e6215c372c 100644
--- a/Documentation/diff-options.txt
+++ b/Documentation/diff-options.txt
@@ -572,5 +572,13 @@ endif::git-format-patch[]
--line-prefix=<prefix>::
Prepend an additional prefix to every line of output.
+--ita-invisible-in-index::
+ By default entries added by "git add -N" appear as an existing
+ empty file in "git diff" and a new file in "git diff --cached".
+ This option makes the entry appear as a new file in "git diff"
+ and non-existent in "git diff --cached". This option could be
+ reverted with `--ita-visible-in-index`. Both options are
+ experimental and could be removed in future.
+
For more detailed explanation on these common options, see also
linkgit:gitdiffcore[7].
diff --git a/Documentation/git-fmt-merge-msg.txt b/Documentation/git-fmt-merge-msg.txt
index 6526b178e8..44892c447e 100644
--- a/Documentation/git-fmt-merge-msg.txt
+++ b/Documentation/git-fmt-merge-msg.txt
@@ -60,10 +60,10 @@ merge.summary::
EXAMPLE
-------
---
+---------
$ git fetch origin master
$ git fmt-merge-msg --log <$GIT_DIR/FETCH_HEAD
---
+---------
Print a log message describing a merge of the "master" branch from
the "origin" remote.
diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt
index 93d1db6528..e99bda6add 100644
--- a/Documentation/git-interpret-trailers.txt
+++ b/Documentation/git-interpret-trailers.txt
@@ -48,19 +48,21 @@ with only spaces at the end of the commit message part, one blank line
will be added before the new trailer.
Existing trailers are extracted from the input message by looking for
-a group of one or more lines that contain a colon (by default), where
-the group is preceded by one or more empty (or whitespace-only) lines.
+a group of one or more lines that (i) are all trailers, or (ii) contains at
+least one Git-generated trailer and consists of at least 25% trailers.
+The group must be preceded by one or more empty (or whitespace-only) lines.
The group must either be at the end of the message or be the last
non-whitespace lines before a line that starts with '---'. Such three
minus signs start the patch part of the message.
-When reading trailers, there can be whitespaces before and after the
+When reading trailers, there can be whitespaces after the
token, the separator and the value. There can also be whitespaces
-inside the token and the value.
+inside the token and the value. The value may be split over multiple lines with
+each subsequent line starting with whitespace, like the "folding" in RFC 822.
Note that 'trailers' do not follow and are not intended to follow many
-rules for RFC 822 headers. For example they do not follow the line
-folding rules, the encoding rules and probably many other rules.
+rules for RFC 822 headers. For example they do not follow
+the encoding rules and probably many other rules.
OPTIONS
-------
diff --git a/Documentation/git-stash.txt b/Documentation/git-stash.txt
index 92df596e5f..2e9cef06e6 100644
--- a/Documentation/git-stash.txt
+++ b/Documentation/git-stash.txt
@@ -39,7 +39,8 @@ The latest stash you created is stored in `refs/stash`; older
stashes are found in the reflog of this reference and can be named using
the usual reflog syntax (e.g. `stash@{0}` is the most recently
created stash, `stash@{1}` is the one before it, `stash@{2.hours.ago}`
-is also possible).
+is also possible). Stashes may also be referenced by specifying just the
+stash index (e.g. the integer `n` is equivalent to `stash@{n}`).
OPTIONS
-------
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 2cf7e225f5..ab7215eee2 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -44,9 +44,10 @@ unreleased) version of Git, that is available from the 'master'
branch of the `git.git` repository.
Documentation for older releases are available here:
-* link:v2.10.1/git.html[documentation for release 2.10.1]
+* link:v2.10.2/git.html[documentation for release 2.10.2]
* release notes for
+ link:RelNotes/2.10.2.txt[2.10.2],
link:RelNotes/2.10.1.txt[2.10.1],
link:RelNotes/2.10.0.txt[2.10].
diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt
index 7aff940202..976243a63e 100644
--- a/Documentation/gitattributes.txt
+++ b/Documentation/gitattributes.txt
@@ -293,7 +293,15 @@ checkout, when the `smudge` command is specified, the command is
fed the blob object from its standard input, and its standard
output is used to update the worktree file. Similarly, the
`clean` command is used to convert the contents of worktree file
-upon checkin.
+upon checkin. By default these commands process only a single
+blob and terminate. If a long running `process` filter is used
+in place of `clean` and/or `smudge` filters, then Git can process
+all blobs with a single filter command invocation for the entire
+life of a single Git command, for example `git add --all`. If a
+long running `process` filter is configured then it always takes
+precedence over a configured single blob filter. See section
+below for the description of the protocol used to communicate with
+a `process` filter.
One use of the content filtering is to massage the content into a shape
that is more convenient for the platform, filesystem, and the user to use.
@@ -373,6 +381,153 @@ not exist, or may have different contents. So, smudge and clean commands
should not try to access the file on disk, but only act as filters on the
content provided to them on standard input.
+Long Running Filter Process
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the filter command (a string value) is defined via
+`filter.<driver>.process` then Git can process all blobs with a
+single filter invocation for the entire life of a single Git
+command. This is achieved by using a packet format (pkt-line,
+see technical/protocol-common.txt) based protocol over standard
+input and standard output as follows. All packets, except for the
+"*CONTENT" packets and the "0000" flush packet, are considered
+text and therefore are terminated by a LF.
+
+Git starts the filter when it encounters the first file
+that needs to be cleaned or smudged. After the filter started
+Git sends a welcome message ("git-filter-client"), a list of supported
+protocol version numbers, and a flush packet. Git expects to read a welcome
+response message ("git-filter-server"), exactly one protocol version number
+from the previously sent list, and a flush packet. All further
+communication will be based on the selected version. The remaining
+protocol description below documents "version=2". Please note that
+"version=42" in the example below does not exist and is only there
+to illustrate how the protocol would look like with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-filter-client
+packet: git> version=2
+packet: git> version=42
+packet: git> 0000
+packet: git< git-filter-server
+packet: git< version=2
+packet: git< 0000
+packet: git> capability=clean
+packet: git> capability=smudge
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=clean
+packet: git< capability=smudge
+packet: git< 0000
+------------------------
+Supported filter capabilities in version 2 are "clean" and
+"smudge".
+
+Afterwards Git sends a list of "key=value" pairs terminated with
+a flush packet. The list will contain at least the filter command
+(based on the supported capabilities) and the pathname of the file
+to filter relative to the repository root. Right after the flush packet
+Git sends the content split in zero or more pkt-line packets and a
+flush packet to terminate content. Please note, that the filter
+must not send any response before it received the content and the
+final flush packet.
+------------------------
+packet: git> command=smudge
+packet: git> pathname=path/testfile.dat
+packet: git> 0000
+packet: git> CONTENT
+packet: git> 0000
+------------------------
+
+The filter is expected to respond with a list of "key=value" pairs
+terminated with a flush packet. If the filter does not experience
+problems then the list must contain a "success" status. Right after
+these packets the filter is expected to send the content in zero
+or more pkt-line packets and a flush packet at the end. Finally, a
+second list of "key=value" pairs terminated with a flush packet
+is expected. The filter can change the status in the second list
+or keep the status as is with an empty list. Please note that the
+empty list must be terminated with a flush packet regardless.
+
+------------------------
+packet: git< status=success
+packet: git< 0000
+packet: git< SMUDGED_CONTENT
+packet: git< 0000
+packet: git< 0000 # empty list, keep "status=success" unchanged!
+------------------------
+
+If the result content is empty then the filter is expected to respond
+with a "success" status and a flush packet to signal the empty content.
+------------------------
+packet: git< status=success
+packet: git< 0000
+packet: git< 0000 # empty content!
+packet: git< 0000 # empty list, keep "status=success" unchanged!
+------------------------
+
+In case the filter cannot or does not want to process the content,
+it is expected to respond with an "error" status.
+------------------------
+packet: git< status=error
+packet: git< 0000
+------------------------
+
+If the filter experiences an error during processing, then it can
+send the status "error" after the content was (partially or
+completely) sent.
+------------------------
+packet: git< status=success
+packet: git< 0000
+packet: git< HALF_WRITTEN_ERRONEOUS_CONTENT
+packet: git< 0000
+packet: git< status=error
+packet: git< 0000
+------------------------
+
+In case the filter cannot or does not want to process the content
+as well as any future content for the lifetime of the Git process,
+then it is expected to respond with an "abort" status at any point
+in the protocol.
+------------------------
+packet: git< status=abort
+packet: git< 0000
+------------------------
+
+Git neither stops nor restarts the filter process in case the
+"error"/"abort" status is set. However, Git sets its exit code
+according to the `filter.<driver>.required` flag, mimicking the
+behavior of the `filter.<driver>.clean` / `filter.<driver>.smudge`
+mechanism.
+
+If the filter dies during the communication or does not adhere to
+the protocol then Git will stop the filter process and restart it
+with the next file that needs to be processed. Depending on the
+`filter.<driver>.required` flag Git will interpret that as error.
+
+After the filter has processed a blob it is expected to wait for
+the next "key=value" list containing a command. Git will close
+the command pipe on exit. The filter is expected to detect EOF
+and exit gracefully on its own. Git will wait until the filter
+process has stopped.
+
+A long running filter demo implementation can be found in
+`contrib/long-running-filter/example.pl` located in the Git
+core repository. If you develop your own long running filter
+process then the `GIT_TRACE_PACKET` environment variables can be
+very helpful for debugging (see linkgit:git[1]).
+
+Please note that you cannot use an existing `filter.<driver>.clean`
+or `filter.<driver>.smudge` command with `filter.<driver>.process`
+because the former two use a different inter process communication
+protocol than the latter one.
+
+
Interaction between checkin/checkout attributes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 55e88b02d4..feddf2326b 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.10.0.GIT
+DEF_VER=v2.11.0-rc0
LF='
'
diff --git a/builtin/archive.c b/builtin/archive.c
index a1e3b940c2..49f491413a 100644
--- a/builtin/archive.c
+++ b/builtin/archive.c
@@ -47,10 +47,10 @@ static int run_remote_archiver(int argc, const char **argv,
if (name_hint) {
const char *format = archive_format_from_filename(name_hint);
if (format)
- packet_write(fd[1], "argument --format=%s\n", format);
+ packet_write_fmt(fd[1], "argument --format=%s\n", format);
}
for (i = 1; i < argc; i++)
- packet_write(fd[1], "argument %s\n", argv[i]);
+ packet_write_fmt(fd[1], "argument %s\n", argv[i]);
packet_flush(fd[1]);
buf = packet_read_line(fd[0], NULL);
diff --git a/builtin/commit.c b/builtin/commit.c
index 1cba3b75c8..8976c3d29b 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -183,7 +183,7 @@ static void determine_whence(struct wt_status *s)
whence = FROM_MERGE;
else if (file_exists(git_path_cherry_pick_head())) {
whence = FROM_CHERRY_PICK;
- if (file_exists(git_path(SEQ_DIR)))
+ if (file_exists(git_path_seq_dir()))
sequencer_in_use = 1;
}
else
@@ -894,9 +894,14 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
if (amend)
parent = "HEAD^1";
- if (get_sha1(parent, sha1))
- commitable = !!active_nr;
- else {
+ if (get_sha1(parent, sha1)) {
+ int i, ita_nr = 0;
+
+ for (i = 0; i < active_nr; i++)
+ if (ce_intent_to_add(active_cache[i]))
+ ita_nr++;
+ commitable = active_nr - ita_nr > 0;
+ } else {
/*
* Unless the user did explicitly request a submodule
* ignore mode by passing a command line option we do
@@ -910,7 +915,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
if (ignore_submodule_arg &&
!strcmp(ignore_submodule_arg, "all"))
diff_flags |= DIFF_OPT_IGNORE_SUBMODULES;
- commitable = index_differs_from(parent, diff_flags);
+ commitable = index_differs_from(parent, diff_flags, 1);
}
}
strbuf_release(&committer_ident);
@@ -1637,7 +1642,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
const char *index_file, *reflog_msg;
char *nl;
unsigned char sha1[20];
- struct commit_list *parents = NULL, **pptr = &parents;
+ struct commit_list *parents = NULL;
struct stat statbuf;
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
@@ -1683,20 +1688,18 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
if (!reflog_msg)
reflog_msg = "commit (initial)";
} else if (amend) {
- struct commit_list *c;
-
if (!reflog_msg)
reflog_msg = "commit (amend)";
- for (c = current_head->parents; c; c = c->next)
- pptr = &commit_list_insert(c->item, pptr)->next;
+ parents = copy_commit_list(current_head->parents);
} else if (whence == FROM_MERGE) {
struct strbuf m = STRBUF_INIT;
FILE *fp;
int allow_fast_forward = 1;
+ struct commit_list **pptr = &parents;
if (!reflog_msg)
reflog_msg = "commit (merge)";
- pptr = &commit_list_insert(current_head, pptr)->next;
+ pptr = commit_list_append(current_head, pptr);
fp = fopen(git_path_merge_head(), "r");
if (fp == NULL)
die_errno(_("could not open '%s' for reading"),
@@ -1707,7 +1710,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
parent = get_merge_parent(m.buf);
if (!parent)
die(_("Corrupt MERGE_HEAD file (%s)"), m.buf);
- pptr = &commit_list_insert(parent, pptr)->next;
+ pptr = commit_list_append(parent, pptr);
}
fclose(fp);
strbuf_release(&m);
@@ -1724,7 +1727,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
reflog_msg = (whence == FROM_CHERRY_PICK)
? "commit (cherry-pick)"
: "commit";
- pptr = &commit_list_insert(current_head, pptr)->next;
+ commit_list_insert(current_head, &parents);
}
/* Finally, get the commit message */
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 74c0546362..b6a5597cbf 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -577,9 +577,12 @@ static void print_compact(struct strbuf *display,
static void format_display(struct strbuf *display, char code,
const char *summary, const char *error,
- const char *remote, const char *local)
+ const char *remote, const char *local,
+ int summary_width)
{
- strbuf_addf(display, "%c %-*s ", code, TRANSPORT_SUMMARY(summary));
+ int width = (summary_width + strlen(summary) - gettext_width(summary));
+
+ strbuf_addf(display, "%c %-*s ", code, width, summary);
if (!compact_format)
print_remote_to_local(display, remote, local);
else
@@ -591,7 +594,8 @@ static void format_display(struct strbuf *display, char code,
static int update_local_ref(struct ref *ref,
const char *remote,
const struct ref *remote_ref,
- struct strbuf *display)
+ struct strbuf *display,
+ int summary_width)
{
struct commit *current = NULL, *updated;
enum object_type type;
@@ -605,7 +609,7 @@ static int update_local_ref(struct ref *ref,
if (!oidcmp(&ref->old_oid, &ref->new_oid)) {
if (verbosity > 0)
format_display(display, '=', _("[up to date]"), NULL,
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
return 0;
}
@@ -619,7 +623,7 @@ static int update_local_ref(struct ref *ref,
*/
format_display(display, '!', _("[rejected]"),
_("can't fetch in current branch"),
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
return 1;
}
@@ -629,7 +633,7 @@ static int update_local_ref(struct ref *ref,
r = s_update_ref("updating tag", ref, 0);
format_display(display, r ? '!' : 't', _("[tag update]"),
r ? _("unable to update local ref") : NULL,
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
return r;
}
@@ -662,7 +666,7 @@ static int update_local_ref(struct ref *ref,
r = s_update_ref(msg, ref, 0);
format_display(display, r ? '!' : '*', what,
r ? _("unable to update local ref") : NULL,
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
return r;
}
@@ -678,7 +682,7 @@ static int update_local_ref(struct ref *ref,
r = s_update_ref("fast-forward", ref, 1);
format_display(display, r ? '!' : ' ', quickref.buf,
r ? _("unable to update local ref") : NULL,
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
strbuf_release(&quickref);
return r;
} else if (force || ref->force) {
@@ -693,12 +697,12 @@ static int update_local_ref(struct ref *ref,
r = s_update_ref("forced-update", ref, 1);
format_display(display, r ? '!' : '+', quickref.buf,
r ? _("unable to update local ref") : _("forced update"),
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
strbuf_release(&quickref);
return r;
} else {
format_display(display, '!', _("[rejected]"), _("non-fast-forward"),
- remote, pretty_ref);
+ remote, pretty_ref, summary_width);
return 1;
}
}
@@ -729,6 +733,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
char *url;
const char *filename = dry_run ? "/dev/null" : git_path_fetch_head();
int want_status;
+ int summary_width = transport_summary_width(ref_map);
fp = fopen(filename, "a");
if (!fp)
@@ -838,13 +843,14 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
strbuf_reset(&note);
if (ref) {
- rc |= update_local_ref(ref, what, rm, &note);
+ rc |= update_local_ref(ref, what, rm, &note,
+ summary_width);
free(ref);
} else
format_display(&note, '*',
*kind ? kind : "branch", NULL,
*what ? what : "HEAD",
- "FETCH_HEAD");
+ "FETCH_HEAD", summary_width);
if (note.len) {
if (verbosity >= 0 && !shown_url) {
fprintf(stderr, _("From %.*s\n"),
@@ -911,6 +917,7 @@ static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map,
int url_len, i, result = 0;
struct ref *ref, *stale_refs = get_stale_heads(refs, ref_count, ref_map);
char *url;
+ int summary_width = transport_summary_width(stale_refs);
const char *dangling_msg = dry_run
? _(" (%s will become dangling)")
: _(" (%s has become dangling)");
@@ -946,7 +953,8 @@ static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map,
shown_url = 1;
}
format_display(&sb, '-', _("[deleted]"), NULL,
- _("(none)"), prettify_refname(ref->name));
+ _("(none)"), prettify_refname(ref->name),
+ summary_width);
fprintf(stderr, " %s\n",sb.buf);
strbuf_release(&sb);
warn_dangling_symref(stderr, dangling_msg, ref->name);
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 1e7c2a98a5..0fd52bd6b4 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -720,7 +720,7 @@ static off_t write_reused_pack(struct sha1file *f)
if (!is_pack_valid(reuse_packfile))
die("packfile is invalid: %s", reuse_packfile->pack_name);
- fd = git_open_noatime(reuse_packfile->pack_name);
+ fd = git_open(reuse_packfile->pack_name);
if (fd < 0)
die_errno("unable to open packfile for reuse: %s",
reuse_packfile->pack_name);
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 680759d256..e6b3879a5b 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -227,7 +227,7 @@ static int receive_pack_config(const char *var, const char *value, void *cb)
static void show_ref(const char *path, const unsigned char *sha1)
{
if (sent_capabilities) {
- packet_write(1, "%s %s\n", sha1_to_hex(sha1), path);
+ packet_write_fmt(1, "%s %s\n", sha1_to_hex(sha1), path);
} else {
struct strbuf cap = STRBUF_INIT;
@@ -242,7 +242,7 @@ static void show_ref(const char *path, const unsigned char *sha1)
if (advertise_push_options)
strbuf_addstr(&cap, " push-options");
strbuf_addf(&cap, " agent=%s", git_user_agent_sanitized());
- packet_write(1, "%s %s%c%s\n",
+ packet_write_fmt(1, "%s %s%c%s\n",
sha1_to_hex(sha1), path, 0, cap.buf);
strbuf_release(&cap);
sent_capabilities = 1;
diff --git a/builtin/remote-ext.c b/builtin/remote-ext.c
index 88eb8f9013..11b48bfb41 100644
--- a/builtin/remote-ext.c
+++ b/builtin/remote-ext.c
@@ -128,9 +128,9 @@ static void send_git_request(int stdin_fd, const char *serv, const char *repo,
const char *vhost)
{
if (!vhost)
- packet_write(stdin_fd, "%s %s%c", serv, repo, 0);
+ packet_write_fmt(stdin_fd, "%s %s%c", serv, repo, 0);
else
- packet_write(stdin_fd, "%s %s%chost=%s%c", serv, repo, 0,
+ packet_write_fmt(stdin_fd, "%s %s%chost=%s%c", serv, repo, 0,
vhost, 0);
}
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 4da1f1da25..cfb0f1510c 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -671,8 +671,9 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
filter &= ~(DO_FLAGS|DO_NOREV);
verify = 1;
abbrev = DEFAULT_ABBREV;
- if (arg[7] == '=')
- abbrev = strtoul(arg + 8, NULL, 10);
+ if (!arg[7])
+ continue;
+ abbrev = strtoul(arg + 8, NULL, 10);
if (abbrev < MINIMUM_ABBREV)
abbrev = MINIMUM_ABBREV;
else if (40 <= abbrev)
diff --git a/builtin/revert.c b/builtin/revert.c
index 4e693808b1..4ca5b51544 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -71,7 +71,7 @@ static void verify_opt_compatible(const char *me, const char *base_opt, ...)
die(_("%s: %s cannot be used with %s"), me, this_opt, base_opt);
}
-static void parse_args(int argc, const char **argv, struct replay_opts *opts)
+static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
{
const char * const * usage_str = revert_or_cherry_pick_usage(opts);
const char *me = action_name(opts);
@@ -115,25 +115,15 @@ static void parse_args(int argc, const char **argv, struct replay_opts *opts)
if (opts->keep_redundant_commits)
opts->allow_empty = 1;
- /* Set the subcommand */
- if (cmd == 'q')
- opts->subcommand = REPLAY_REMOVE_STATE;
- else if (cmd == 'c')
- opts->subcommand = REPLAY_CONTINUE;
- else if (cmd == 'a')
- opts->subcommand = REPLAY_ROLLBACK;
- else
- opts->subcommand = REPLAY_NONE;
-
/* Check for incompatible command line arguments */
- if (opts->subcommand != REPLAY_NONE) {
+ if (cmd) {
char *this_operation;
- if (opts->subcommand == REPLAY_REMOVE_STATE)
+ if (cmd == 'q')
this_operation = "--quit";
- else if (opts->subcommand == REPLAY_CONTINUE)
+ else if (cmd == 'c')
this_operation = "--continue";
else {
- assert(opts->subcommand == REPLAY_ROLLBACK);
+ assert(cmd == 'a');
this_operation = "--abort";
}
@@ -156,7 +146,7 @@ static void parse_args(int argc, const char **argv, struct replay_opts *opts)
"--edit", opts->edit,
NULL);
- if (opts->subcommand != REPLAY_NONE) {
+ if (cmd) {
opts->revs = NULL;
} else {
struct setup_revision_opt s_r_opt;
@@ -174,20 +164,30 @@ static void parse_args(int argc, const char **argv, struct replay_opts *opts)
if (argc > 1)
usage_with_options(usage_str, options);
+
+ /* These option values will be free()d */
+ opts->gpg_sign = xstrdup_or_null(opts->gpg_sign);
+ opts->strategy = xstrdup_or_null(opts->strategy);
+
+ if (cmd == 'q')
+ return sequencer_remove_state(opts);
+ if (cmd == 'c')
+ return sequencer_continue(opts);
+ if (cmd == 'a')
+ return sequencer_rollback(opts);
+ return sequencer_pick_revisions(opts);
}
int cmd_revert(int argc, const char **argv, const char *prefix)
{
- struct replay_opts opts;
+ struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
- memset(&opts, 0, sizeof(opts));
if (isatty(0))
opts.edit = 1;
opts.action = REPLAY_REVERT;
git_config(git_default_config, NULL);
- parse_args(argc, argv, &opts);
- res = sequencer_pick_revisions(&opts);
+ res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("revert failed"));
return res;
@@ -195,14 +195,12 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
{
- struct replay_opts opts;
+ struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
- memset(&opts, 0, sizeof(opts));
opts.action = REPLAY_PICK;
git_config(git_default_config, NULL);
- parse_args(argc, argv, &opts);
- res = sequencer_pick_revisions(&opts);
+ res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("cherry-pick failed"));
return res;
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 6182eb3197..4beeda5f9f 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -95,6 +95,8 @@ static int chop_last_dir(char **remoteurl, int is_relative)
* NEEDSWORK: This works incorrectly on the domain and protocol part.
* remote_url url outcome expectation
* http://a.com/b ../c http://a.com/c as is
+ * http://a.com/b/ ../c http://a.com/c same as previous line, but
+ * ignore trailing slash in url
* http://a.com/b ../../c http://c error out
* http://a.com/b ../../../c http:/c error out
* http://a.com/b ../../../../c http:c error out
@@ -113,8 +115,8 @@ static char *relative_url(const char *remote_url,
struct strbuf sb = STRBUF_INIT;
size_t len = strlen(remoteurl);
- if (is_dir_sep(remoteurl[len]))
- remoteurl[len] = '\0';
+ if (is_dir_sep(remoteurl[len-1]))
+ remoteurl[len-1] = '\0';
if (!url_is_local_not_ssh(remoteurl) || is_absolute_path(remoteurl))
is_relative = 0;
@@ -147,6 +149,8 @@ static char *relative_url(const char *remote_url,
}
strbuf_reset(&sb);
strbuf_addf(&sb, "%s%s%s", remoteurl, colonsep ? ":" : "/", url);
+ if (ends_with(url, "/"))
+ strbuf_setlen(&sb, sb.len - 1);
free(remoteurl);
if (starts_with_dot_slash(sb.buf))
diff --git a/builtin/upload-archive.c b/builtin/upload-archive.c
index 2caedf1849..dc872f6a94 100644
--- a/builtin/upload-archive.c
+++ b/builtin/upload-archive.c
@@ -88,11 +88,11 @@ int cmd_upload_archive(int argc, const char **argv, const char *prefix)
writer.git_cmd = 1;
if (start_command(&writer)) {
int err = errno;
- packet_write(1, "NACK unable to spawn subprocess\n");
+ packet_write_fmt(1, "NACK unable to spawn subprocess\n");
die("upload-archive: %s", strerror(err));
}
- packet_write(1, "ACK\n");
+ packet_write_fmt(1, "ACK\n");
packet_flush(1);
while (1) {
diff --git a/cache.h b/cache.h
index 607fe6ed30..a50a61a197 100644
--- a/cache.h
+++ b/cache.h
@@ -1125,7 +1125,7 @@ extern int write_sha1_file(const void *buf, unsigned long len, const char *type,
extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, unsigned char *sha1, unsigned flags);
extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
extern int force_object_loose(const unsigned char *sha1, time_t mtime);
-extern int git_open_noatime(const char *name);
+extern int git_open(const char *name);
extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
@@ -1190,6 +1190,9 @@ static inline int hex2chr(const char *s)
#define MINIMUM_ABBREV minimum_abbrev
#define DEFAULT_ABBREV default_abbrev
+/* used when the code does not know or care what the default abbrev is */
+#define FALLBACK_DEFAULT_ABBREV 7
+
struct object_context {
unsigned char tree[20];
char path[PATH_MAX];
@@ -1490,6 +1493,12 @@ extern void prepare_packed_git(void);
extern void reprepare_packed_git(void);
extern void install_packed_git(struct packed_git *pack);
+/*
+ * Give a rough count of objects in the repository. This sacrifices accuracy
+ * for speed.
+ */
+unsigned long approximate_object_count(void);
+
extern struct packed_git *find_sha1_pack(const unsigned char *sha1,
struct packed_git *packs);
diff --git a/connect.c b/connect.c
index d99d6435fd..8cb93b0720 100644
--- a/connect.c
+++ b/connect.c
@@ -750,7 +750,7 @@ struct child_process *git_connect(int fd[2], const char *url,
* Note: Do not add any other headers here! Doing so
* will cause older git-daemon servers to crash.
*/
- packet_write(fd[1],
+ packet_write_fmt(fd[1],
"%s %s%chost=%s%c",
prog, path, 0,
target_host, 0);
diff --git a/contrib/coccinelle/object_id.cocci b/contrib/coccinelle/object_id.cocci
index 0307624a03..09afdbf994 100644
--- a/contrib/coccinelle/object_id.cocci
+++ b/contrib/coccinelle/object_id.cocci
@@ -17,10 +17,13 @@ expression E1;
+ oid_to_hex(&E1)
@@
+identifier f != oid_to_hex;
expression E1;
@@
+ f(...) {...
- sha1_to_hex(E1->hash)
+ oid_to_hex(E1)
+ ...}
@@
expression E1, E2;
@@ -29,10 +32,13 @@ expression E1, E2;
+ oid_to_hex_r(E1, &E2)
@@
+identifier f != oid_to_hex_r;
expression E1, E2;
@@
+ f(...) {...
- sha1_to_hex_r(E1, E2->hash)
+ oid_to_hex_r(E1, E2)
+ ...}
@@
expression E1;
@@ -41,10 +47,13 @@ expression E1;
+ oidclr(&E1)
@@
+identifier f != oidclr;
expression E1;
@@
+ f(...) {...
- hashclr(E1->hash)
+ oidclr(E1)
+ ...}
@@
expression E1, E2;
@@ -53,10 +62,13 @@ expression E1, E2;
+ oidcmp(&E1, &E2)
@@
+identifier f != oidcmp;
expression E1, E2;
@@
+ f(...) {...
- hashcmp(E1->hash, E2->hash)
+ oidcmp(E1, E2)
+ ...}
@@
expression E1, E2;
@@ -77,10 +89,13 @@ expression E1, E2;
+ oidcpy(&E1, &E2)
@@
+identifier f != oidcpy;
expression E1, E2;
@@
+ f(...) {...
- hashcpy(E1->hash, E2->hash)
+ oidcpy(E1, E2)
+ ...}
@@
expression E1, E2;
diff --git a/contrib/long-running-filter/example.pl b/contrib/long-running-filter/example.pl
new file mode 100755
index 0000000000..39457055a5
--- /dev/null
+++ b/contrib/long-running-filter/example.pl
@@ -0,0 +1,128 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git filter protocol version 2
+# See Documentation/gitattributes.txt, section "Filter Protocol"
+#
+# Please note, this pass-thru filter is a minimal skeleton. No proper
+# error handling was implemented.
+#
+
+use strict;
+use warnings;
+
+my $MAX_PACKET_CONTENT_SIZE = 65516;
+
+sub packet_bin_read {
+ my $buffer;
+ my $bytes_read = read STDIN, $buffer, 4;
+ if ( $bytes_read == 0 ) {
+
+ # EOF - Git stopped talking to us!
+ exit();
+ }
+ elsif ( $bytes_read != 4 ) {
+ die "invalid packet: '$buffer'";
+ }
+ my $pkt_size = hex($buffer);
+ if ( $pkt_size == 0 ) {
+ return ( 1, "" );
+ }
+ elsif ( $pkt_size > 4 ) {
+ my $content_size = $pkt_size - 4;
+ $bytes_read = read STDIN, $buffer, $content_size;
+ if ( $bytes_read != $content_size ) {
+ die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+ }
+ return ( 0, $buffer );
+ }
+ else {
+ die "invalid packet size: $pkt_size";
+ }
+}
+
+sub packet_txt_read {
+ my ( $res, $buf ) = packet_bin_read();
+ unless ( $buf =~ s/\n$// ) {
+ die "A non-binary line MUST be terminated by an LF.";
+ }
+ return ( $res, $buf );
+}
+
+sub packet_bin_write {
+ my $buf = shift;
+ print STDOUT sprintf( "%04x", length($buf) + 4 );
+ print STDOUT $buf;
+ STDOUT->flush();
+}
+
+sub packet_txt_write {
+ packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+ print STDOUT sprintf( "%04x", 0 );
+ STDOUT->flush();
+}
+
+( packet_txt_read() eq ( 0, "git-filter-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=2" ) ) || die "bad version";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad version end";
+
+packet_txt_write("git-filter-server");
+packet_txt_write("version=2");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=clean" ) ) || die "bad capability";
+( packet_txt_read() eq ( 0, "capability=smudge" ) ) || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
+
+packet_txt_write("capability=clean");
+packet_txt_write("capability=smudge");
+packet_flush();
+
+while (1) {
+ my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+ my ($pathname) = packet_txt_read() =~ /^pathname=([^=]+)$/;
+
+ packet_bin_read();
+
+ my $input = "";
+ {
+ binmode(STDIN);
+ my $buffer;
+ my $done = 0;
+ while ( !$done ) {
+ ( $done, $buffer ) = packet_bin_read();
+ $input .= $buffer;
+ }
+ }
+
+ my $output;
+ if ( $command eq "clean" ) {
+ ### Perform clean here ###
+ $output = $input;
+ }
+ elsif ( $command eq "smudge" ) {
+ ### Perform smudge here ###
+ $output = $input;
+ }
+ else {
+ die "bad command '$command'";
+ }
+
+ packet_txt_write("status=success");
+ packet_flush();
+ while ( length($output) > 0 ) {
+ my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
+ packet_bin_write($packet);
+ if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
+ $output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
+ }
+ else {
+ $output = "";
+ }
+ }
+ packet_flush(); # flush content!
+ packet_flush(); # empty list, keep "status=success" unchanged!
+
+}
diff --git a/convert.c b/convert.c
index 0ad39b16cc..be91358462 100644
--- a/convert.c
+++ b/convert.c
@@ -3,6 +3,7 @@
#include "run-command.h"
#include "quote.h"
#include "sigchain.h"
+#include "pkt-line.h"
/*
* convert.c - convert a file when checking it out and checking it in.
@@ -416,7 +417,7 @@ static int filter_buffer_or_fd(int in, int out, void *data)
child_process.out = out;
if (start_command(&child_process))
- return error("cannot fork to run external filter %s", params->cmd);
+ return error("cannot fork to run external filter '%s'", params->cmd);
sigchain_push(SIGPIPE, SIG_IGN);
@@ -434,19 +435,19 @@ static int filter_buffer_or_fd(int in, int out, void *data)
if (close(child_process.in))
write_err = 1;
if (write_err)
- error("cannot feed the input to external filter %s", params->cmd);
+ error("cannot feed the input to external filter '%s'", params->cmd);
sigchain_pop(SIGPIPE);
status = finish_command(&child_process);
if (status)
- error("external filter %s failed %d", params->cmd, status);
+ error("external filter '%s' failed %d", params->cmd, status);
strbuf_release(&cmd);
return (write_err || status);
}
-static int apply_filter(const char *path, const char *src, size_t len, int fd,
+static int apply_single_file_filter(const char *path, const char *src, size_t len, int fd,
struct strbuf *dst, const char *cmd)
{
/*
@@ -455,17 +456,11 @@ static int apply_filter(const char *path, const char *src, size_t len, int fd,
*
* (child --> cmd) --> us
*/
- int ret = 1;
+ int err = 0;
struct strbuf nbuf = STRBUF_INIT;
struct async async;
struct filter_params params;
- if (!cmd || !*cmd)
- return 0;
-
- if (!dst)
- return 1;
-
memset(&async, 0, sizeof(async));
async.proc = filter_buffer_or_fd;
async.data = &params;
@@ -481,23 +476,304 @@ static int apply_filter(const char *path, const char *src, size_t len, int fd,
return 0; /* error was already reported */
if (strbuf_read(&nbuf, async.out, len) < 0) {
- error("read from external filter %s failed", cmd);
- ret = 0;
+ err = error("read from external filter '%s' failed", cmd);
}
if (close(async.out)) {
- error("read from external filter %s failed", cmd);
- ret = 0;
+ err = error("read from external filter '%s' failed", cmd);
}
if (finish_async(&async)) {
- error("external filter %s failed", cmd);
- ret = 0;
+ err = error("external filter '%s' failed", cmd);
}
- if (ret) {
+ if (!err) {
strbuf_swap(dst, &nbuf);
}
strbuf_release(&nbuf);
- return ret;
+ return !err;
+}
+
+#define CAP_CLEAN (1u<<0)
+#define CAP_SMUDGE (1u<<1)
+
+struct cmd2process {
+ struct hashmap_entry ent; /* must be the first member! */
+ unsigned int supported_capabilities;
+ const char *cmd;
+ struct child_process process;
+};
+
+static int cmd_process_map_initialized;
+static struct hashmap cmd_process_map;
+
+static int cmd2process_cmp(const struct cmd2process *e1,
+ const struct cmd2process *e2,
+ const void *unused)
+{
+ return strcmp(e1->cmd, e2->cmd);
+}
+
+static struct cmd2process *find_multi_file_filter_entry(struct hashmap *hashmap, const char *cmd)
+{
+ struct cmd2process key;
+ hashmap_entry_init(&key, strhash(cmd));
+ key.cmd = cmd;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int packet_write_list(int fd, const char *line, ...)
+{
+ va_list args;
+ int err;
+ va_start(args, line);
+ for (;;) {
+ if (!line)
+ break;
+ if (strlen(line) > LARGE_PACKET_DATA_MAX)
+ return -1;
+ err = packet_write_fmt_gently(fd, "%s\n", line);
+ if (err)
+ return err;
+ line = va_arg(args, const char*);
+ }
+ va_end(args);
+ return packet_flush_gently(fd);
+}
+
+static void read_multi_file_filter_status(int fd, struct strbuf *status)
+{
+ struct strbuf **pair;
+ char *line;
+ for (;;) {
+ line = packet_read_line(fd, NULL);
+ if (!line)
+ break;
+ pair = strbuf_split_str(line, '=', 2);
+ if (pair[0] && pair[0]->len && pair[1]) {
+ /* the last "status=<foo>" line wins */
+ if (!strcmp(pair[0]->buf, "status=")) {
+ strbuf_reset(status);
+ strbuf_addbuf(status, pair[1]);
+ }
+ }
+ strbuf_list_free(pair);
+ }
+}
+
+static void kill_multi_file_filter(struct hashmap *hashmap, struct cmd2process *entry)
+{
+ if (!entry)
+ return;
+
+ entry->process.clean_on_exit = 0;
+ kill(entry->process.pid, SIGTERM);
+ finish_command(&entry->process);
+
+ hashmap_remove(hashmap, entry, NULL);
+ free(entry);
+}
+
+static void stop_multi_file_filter(struct child_process *process)
+{
+ sigchain_push(SIGPIPE, SIG_IGN);
+ /* Closing the pipe signals the filter to initiate a shutdown. */
+ close(process->in);
+ close(process->out);
+ sigchain_pop(SIGPIPE);
+ /* Finish command will wait until the shutdown is complete. */
+ finish_command(process);
+}
+
+static struct cmd2process *start_multi_file_filter(struct hashmap *hashmap, const char *cmd)
+{
+ int err;
+ struct cmd2process *entry;
+ struct child_process *process;
+ const char *argv[] = { cmd, NULL };
+ struct string_list cap_list = STRING_LIST_INIT_NODUP;
+ char *cap_buf;
+ const char *cap_name;
+
+ entry = xmalloc(sizeof(*entry));
+ entry->cmd = cmd;
+ entry->supported_capabilities = 0;
+ process = &entry->process;
+
+ child_process_init(process);
+ process->argv = argv;
+ process->use_shell = 1;
+ process->in = -1;
+ process->out = -1;
+ process->clean_on_exit = 1;
+ process->clean_on_exit_handler = stop_multi_file_filter;
+
+ if (start_command(process)) {
+ error("cannot fork to run external filter '%s'", cmd);
+ return NULL;
+ }
+
+ hashmap_entry_init(entry, strhash(cmd));
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = packet_write_list(process->in, "git-filter-client", "version=2", NULL);
+ if (err)
+ goto done;
+
+ err = strcmp(packet_read_line(process->out, NULL), "git-filter-server");
+ if (err) {
+ error("external filter '%s' does not support filter protocol version 2", cmd);
+ goto done;
+ }
+ err = strcmp(packet_read_line(process->out, NULL), "version=2");
+ if (err)
+ goto done;
+ err = packet_read_line(process->out, NULL) != NULL;
+ if (err)
+ goto done;
+
+ err = packet_write_list(process->in, "capability=clean", "capability=smudge", NULL);
+
+ for (;;) {
+ cap_buf = packet_read_line(process->out, NULL);
+ if (!cap_buf)
+ break;
+ string_list_split_in_place(&cap_list, cap_buf, '=', 1);
+
+ if (cap_list.nr != 2 || strcmp(cap_list.items[0].string, "capability"))
+ continue;
+
+ cap_name = cap_list.items[1].string;
+ if (!strcmp(cap_name, "clean")) {
+ entry->supported_capabilities |= CAP_CLEAN;
+ } else if (!strcmp(cap_name, "smudge")) {
+ entry->supported_capabilities |= CAP_SMUDGE;
+ } else {
+ warning(
+ "external filter '%s' requested unsupported filter capability '%s'",
+ cmd, cap_name
+ );
+ }
+
+ string_list_clear(&cap_list, 0);
+ }
+
+done:
+ sigchain_pop(SIGPIPE);
+
+ if (err || errno == EPIPE) {
+ error("initialization for external filter '%s' failed", cmd);
+ kill_multi_file_filter(hashmap, entry);
+ return NULL;
+ }
+
+ hashmap_add(hashmap, entry);
+ return entry;
+}
+
+static int apply_multi_file_filter(const char *path, const char *src, size_t len,
+ int fd, struct strbuf *dst, const char *cmd,
+ const unsigned int wanted_capability)
+{
+ int err;
+ struct cmd2process *entry;
+ struct child_process *process;
+ struct strbuf nbuf = STRBUF_INIT;
+ struct strbuf filter_status = STRBUF_INIT;
+ const char *filter_type;
+
+ if (!cmd_process_map_initialized) {
+ cmd_process_map_initialized = 1;
+ hashmap_init(&cmd_process_map, (hashmap_cmp_fn) cmd2process_cmp, 0);
+ entry = NULL;
+ } else {
+ entry = find_multi_file_filter_entry(&cmd_process_map, cmd);
+ }
+
+ fflush(NULL);
+
+ if (!entry) {
+ entry = start_multi_file_filter(&cmd_process_map, cmd);
+ if (!entry)
+ return 0;
+ }
+ process = &entry->process;
+
+ if (!(wanted_capability & entry->supported_capabilities))
+ return 0;
+
+ if (CAP_CLEAN & wanted_capability)
+ filter_type = "clean";
+ else if (CAP_SMUDGE & wanted_capability)
+ filter_type = "smudge";
+ else
+ die("unexpected filter type");
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ assert(strlen(filter_type) < LARGE_PACKET_DATA_MAX - strlen("command=\n"));
+ err = packet_write_fmt_gently(process->in, "command=%s\n", filter_type);
+ if (err)
+ goto done;
+
+ err = strlen(path) > LARGE_PACKET_DATA_MAX - strlen("pathname=\n");
+ if (err) {
+ error("path name too long for external filter");
+ goto done;
+ }
+
+ err = packet_write_fmt_gently(process->in, "pathname=%s\n", path);
+ if (err)
+ goto done;
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ goto done;
+
+ if (fd >= 0)
+ err = write_packetized_from_fd(fd, process->in);
+ else
+ err = write_packetized_from_buf(src, len, process->in);
+ if (err)
+ goto done;
+
+ read_multi_file_filter_status(process->out, &filter_status);
+ err = strcmp(filter_status.buf, "success");
+ if (err)
+ goto done;
+
+ err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
+ if (err)
+ goto done;
+
+ read_multi_file_filter_status(process->out, &filter_status);
+ err = strcmp(filter_status.buf, "success");
+
+done:
+ sigchain_pop(SIGPIPE);
+
+ if (err || errno == EPIPE) {
+ if (!strcmp(filter_status.buf, "error")) {
+ /* The filter signaled a problem with the file. */
+ } else if (!strcmp(filter_status.buf, "abort")) {
+ /*
+ * The filter signaled a permanent problem. Don't try to filter
+ * files with the same command for the lifetime of the current
+ * Git process.
+ */
+ entry->supported_capabilities &= ~wanted_capability;
+ } else {
+ /*
+ * Something went wrong with the protocol filter.
+ * Force shutdown and restart if another blob requires filtering.
+ */
+ error("external filter '%s' failed", cmd);
+ kill_multi_file_filter(&cmd_process_map, entry);
+ }
+ } else {
+ strbuf_swap(dst, &nbuf);
+ }
+ strbuf_release(&nbuf);
+ return !err;
}
static struct convert_driver {
@@ -505,9 +781,35 @@ static struct convert_driver {
struct convert_driver *next;
const char *smudge;
const char *clean;
+ const char *process;
int required;
} *user_convert, **user_convert_tail;
+static int apply_filter(const char *path, const char *src, size_t len,
+ int fd, struct strbuf *dst, struct convert_driver *drv,
+ const unsigned int wanted_capability)
+{
+ const char *cmd = NULL;
+
+ if (!drv)
+ return 0;
+
+ if (!dst)
+ return 1;
+
+ if ((CAP_CLEAN & wanted_capability) && !drv->process && drv->clean)
+ cmd = drv->clean;
+ else if ((CAP_SMUDGE & wanted_capability) && !drv->process && drv->smudge)
+ cmd = drv->smudge;
+
+ if (cmd && *cmd)
+ return apply_single_file_filter(path, src, len, fd, dst, cmd);
+ else if (drv->process && *drv->process)
+ return apply_multi_file_filter(path, src, len, fd, dst, drv->process, wanted_capability);
+
+ return 0;
+}
+
static int read_convert_config(const char *var, const char *value, void *cb)
{
const char *key, *name;
@@ -545,6 +847,9 @@ static int read_convert_config(const char *var, const char *value, void *cb)
if (!strcmp("clean", key))
return git_config_string(&drv->clean, var, value);
+ if (!strcmp("process", key))
+ return git_config_string(&drv->process, var, value);
+
if (!strcmp("required", key)) {
drv->required = git_config_bool(var, value);
return 0;
@@ -846,7 +1151,7 @@ int would_convert_to_git_filter_fd(const char *path)
if (!ca.drv->required)
return 0;
- return apply_filter(path, NULL, 0, -1, NULL, ca.drv->clean);
+ return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN);
}
const char *get_convert_attr_ascii(const char *path)
@@ -879,18 +1184,12 @@ int convert_to_git(const char *path, const char *src, size_t len,
struct strbuf *dst, enum safe_crlf checksafe)
{
int ret = 0;
- const char *filter = NULL;
- int required = 0;
struct conv_attrs ca;
convert_attrs(&ca, path);
- if (ca.drv) {
- filter = ca.drv->clean;
- required = ca.drv->required;
- }
- ret |= apply_filter(path, src, len, -1, dst, filter);
- if (!ret && required)
+ ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN);
+ if (!ret && ca.drv && ca.drv->required)
die("%s: clean filter '%s' failed", path, ca.drv->name);
if (ret && dst) {
@@ -912,9 +1211,9 @@ void convert_to_git_filter_fd(const char *path, int fd, struct strbuf *dst,
convert_attrs(&ca, path);
assert(ca.drv);
- assert(ca.drv->clean);
+ assert(ca.drv->clean || ca.drv->process);
- if (!apply_filter(path, NULL, 0, fd, dst, ca.drv->clean))
+ if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN))
die("%s: clean filter '%s' failed", path, ca.drv->name);
crlf_to_git(path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
@@ -926,15 +1225,9 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
int normalizing)
{
int ret = 0, ret_filter = 0;
- const char *filter = NULL;
- int required = 0;
struct conv_attrs ca;
convert_attrs(&ca, path);
- if (ca.drv) {
- filter = ca.drv->smudge;
- required = ca.drv->required;
- }
ret |= ident_to_worktree(path, src, len, dst, ca.ident);
if (ret) {
@@ -943,9 +1236,10 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
}
/*
* CRLF conversion can be skipped if normalizing, unless there
- * is a smudge filter. The filter might expect CRLFs.
+ * is a smudge or process filter (even if the process filter doesn't
+ * support smudge). The filters might expect CRLFs.
*/
- if (filter || !normalizing) {
+ if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) {
ret |= crlf_to_worktree(path, src, len, dst, ca.crlf_action);
if (ret) {
src = dst->buf;
@@ -953,8 +1247,8 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
}
}
- ret_filter = apply_filter(path, src, len, -1, dst, filter);
- if (!ret_filter && required)
+ ret_filter = apply_filter(path, src, len, -1, dst, ca.drv, CAP_SMUDGE);
+ if (!ret_filter && ca.drv && ca.drv->required)
die("%s: smudge filter %s failed", path, ca.drv->name);
return ret | ret_filter;
@@ -1406,7 +1700,7 @@ struct stream_filter *get_stream_filter(const char *path, const unsigned char *s
struct stream_filter *filter = NULL;
convert_attrs(&ca, path);
- if (ca.drv && (ca.drv->smudge || ca.drv->clean))
+ if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
return NULL;
if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
diff --git a/daemon.c b/daemon.c
index 425aad0507..473e6b6b63 100644
--- a/daemon.c
+++ b/daemon.c
@@ -160,6 +160,7 @@ static const char *path_ok(const char *directory, struct hostinfo *hi)
{
static char rpath[PATH_MAX];
static char interp_path[PATH_MAX];
+ size_t rlen;
const char *path;
const char *dir;
@@ -187,8 +188,12 @@ static const char *path_ok(const char *directory, struct hostinfo *hi)
namlen = slash - dir;
restlen -= namlen;
loginfo("userpath <%s>, request <%s>, namlen %d, restlen %d, slash <%s>", user_path, dir, namlen, restlen, slash);
- snprintf(rpath, PATH_MAX, "%.*s/%s%.*s",
- namlen, dir, user_path, restlen, slash);
+ rlen = snprintf(rpath, sizeof(rpath), "%.*s/%s%.*s",
+ namlen, dir, user_path, restlen, slash);
+ if (rlen >= sizeof(rpath)) {
+ logerror("user-path too large: %s", rpath);
+ return NULL;
+ }
dir = rpath;
}
}
@@ -207,7 +212,15 @@ static const char *path_ok(const char *directory, struct hostinfo *hi)
strbuf_expand(&expanded_path, interpolated_path,
expand_path, &context);
- strlcpy(interp_path, expanded_path.buf, PATH_MAX);
+
+ rlen = strlcpy(interp_path, expanded_path.buf,
+ sizeof(interp_path));
+ if (rlen >= sizeof(interp_path)) {
+ logerror("interpolated path too large: %s",
+ interp_path);
+ return NULL;
+ }
+
strbuf_release(&expanded_path);
loginfo("Interpolated dir '%s'", interp_path);
@@ -219,7 +232,11 @@ static const char *path_ok(const char *directory, struct hostinfo *hi)
logerror("'%s': Non-absolute path denied (base-path active)", dir);
return NULL;
}
- snprintf(rpath, PATH_MAX, "%s%s", base_path, dir);
+ rlen = snprintf(rpath, sizeof(rpath), "%s%s", base_path, dir);
+ if (rlen >= sizeof(rpath)) {
+ logerror("base-path too large: %s", rpath);
+ return NULL;
+ }
dir = rpath;
}
@@ -281,7 +298,7 @@ static int daemon_error(const char *dir, const char *msg)
{
if (!informative_errors)
msg = "access denied or repository not exported";
- packet_write(1, "ERR %s: %s", msg, dir);
+ packet_write_fmt(1, "ERR %s: %s", msg, dir);
return -1;
}
diff --git a/diff-lib.c b/diff-lib.c
index 3007c8524c..52447466b5 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -214,6 +214,12 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
!is_null_oid(&ce->oid),
ce->name, 0);
continue;
+ } else if (revs->diffopt.ita_invisible_in_index &&
+ ce_intent_to_add(ce)) {
+ diff_addremove(&revs->diffopt, '+', ce->ce_mode,
+ EMPTY_BLOB_SHA1_BIN, 0,
+ ce->name, 0);
+ continue;
}
changed = match_stat_with_submodule(&revs->diffopt, ce, &st,
@@ -379,6 +385,14 @@ static void do_oneway_diff(struct unpack_trees_options *o,
struct rev_info *revs = o->unpack_data;
int match_missing, cached;
+ /* i-t-a entries do not actually exist in the index */
+ if (revs->diffopt.ita_invisible_in_index &&
+ idx && ce_intent_to_add(idx)) {
+ idx = NULL;
+ if (!tree)
+ return; /* nothing to diff.. */
+ }
+
/* if the entry is not checked out, don't examine work tree */
cached = o->index_only ||
(idx && ((idx->ce_flags & CE_VALID) || ce_skip_worktree(idx)));
@@ -521,7 +535,8 @@ int do_diff_cache(const unsigned char *tree_sha1, struct diff_options *opt)
return 0;
}
-int index_differs_from(const char *def, int diff_flags)
+int index_differs_from(const char *def, int diff_flags,
+ int ita_invisible_in_index)
{
struct rev_info rev;
struct setup_revision_opt opt;
@@ -533,6 +548,7 @@ int index_differs_from(const char *def, int diff_flags)
DIFF_OPT_SET(&rev.diffopt, QUICK);
DIFF_OPT_SET(&rev.diffopt, EXIT_WITH_STATUS);
rev.diffopt.flags |= diff_flags;
+ rev.diffopt.ita_invisible_in_index = ita_invisible_in_index;
run_diff_index(&rev, 1);
if (rev.pending.alloc)
free(rev.pending.objects);
diff --git a/diff.c b/diff.c
index 4c47bb3a2d..8981477c43 100644
--- a/diff.c
+++ b/diff.c
@@ -3102,7 +3102,9 @@ static const char *diff_abbrev_oid(const struct object_id *oid, int abbrev)
return find_unique_abbrev(oid->hash, abbrev);
else {
char *hex = oid_to_hex(oid);
- if (abbrev < 0 || abbrev > GIT_SHA1_HEXSZ)
+ if (abbrev < 0)
+ abbrev = FALLBACK_DEFAULT_ABBREV;
+ if (abbrev > GIT_SHA1_HEXSZ)
die("BUG: oid abbreviation out of range: %d", abbrev);
hex[abbrev] = '\0';
return hex;
@@ -3481,7 +3483,7 @@ void diff_setup_done(struct diff_options *options)
*/
read_cache();
}
- if (options->abbrev <= 0 || 40 < options->abbrev)
+ if (40 < options->abbrev)
options->abbrev = 40; /* full */
/*
@@ -3985,6 +3987,10 @@ int diff_opt_parse(struct diff_options *options,
return parse_submodule_opt(options, arg);
else if (skip_prefix(arg, "--ws-error-highlight=", &arg))
return parse_ws_error_highlight_opt(options, arg);
+ else if (!strcmp(arg, "--ita-invisible-in-index"))
+ options->ita_invisible_in_index = 1;
+ else if (!strcmp(arg, "--ita-visible-in-index"))
+ options->ita_invisible_in_index = 0;
/* misc options */
else if (!strcmp(arg, "-z"))
diff --git a/diff.h b/diff.h
index 01afc70bd5..e9ccb38c26 100644
--- a/diff.h
+++ b/diff.h
@@ -146,6 +146,7 @@ struct diff_options {
int dirstat_permille;
int setup;
int abbrev;
+ int ita_invisible_in_index;
/* white-space error highlighting */
#define WSEH_NEW 1
#define WSEH_CONTEXT 2
@@ -360,7 +361,7 @@ extern int diff_result_code(struct diff_options *, int);
extern void diff_no_index(struct rev_info *, int, const char **);
-extern int index_differs_from(const char *def, int diff_flags);
+extern int index_differs_from(const char *def, int diff_flags, int ita_invisible_in_index);
/*
* Fill the contents of the filespec "df", respecting any textconv defined by
diff --git a/environment.c b/environment.c
index cdc097f80c..0935ec696e 100644
--- a/environment.c
+++ b/environment.c
@@ -16,7 +16,7 @@ int trust_executable_bit = 1;
int trust_ctime = 1;
int check_stat = 1;
int has_symlinks = 1;
-int minimum_abbrev = 4, default_abbrev = 7;
+int minimum_abbrev = 4, default_abbrev = -1;
int ignore_case;
int assume_unchanged;
int prefer_symlink_refs;
diff --git a/git-compat-util.h b/git-compat-util.h
index 49ca28cb6b..87237b092b 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -1101,6 +1101,6 @@ struct tm *git_gmtime_r(const time_t *, struct tm *);
#define getc_unlocked(fh) getc(fh)
#endif
-#endif
-
extern int cmd_main(int, const char **);
+
+#endif
diff --git a/git-sh-setup.sh b/git-sh-setup.sh
index a8a4576342..240c7ebcd1 100644
--- a/git-sh-setup.sh
+++ b/git-sh-setup.sh
@@ -2,9 +2,6 @@
# to set up some variables pointing at the normal git directories and
# a few helper shell functions.
-# Source git-sh-i18n for gettext support.
-. git-sh-i18n
-
# Having this variable in your environment would break scripts because
# you would cause "cd" to be taken to unexpected places. If you
# like CDPATH, define it for your interactive shell sessions without
@@ -46,6 +43,9 @@ git_broken_path_fix () {
# @@BROKEN_PATH_FIX@@
+# Source git-sh-i18n for gettext support.
+. "$(git --exec-path)/git-sh-i18n"
+
die () {
die_with_status 1 "$@"
}
diff --git a/git-stash.sh b/git-stash.sh
index 90d63f293e..4546abaaef 100755
--- a/git-stash.sh
+++ b/git-stash.sh
@@ -384,9 +384,8 @@ parse_flags_and_rev()
i_tree=
u_tree=
- REV=$(git rev-parse --no-flags --symbolic --sq "$@") || exit 1
-
FLAGS=
+ REV=
for opt
do
case "$opt" in
@@ -404,6 +403,9 @@ parse_flags_and_rev()
die "$(eval_gettext "unknown option: \$opt")"
FLAGS="${FLAGS}${FLAGS:+ }$opt"
;;
+ *)
+ REV="${REV}${REV:+ }'$opt'"
+ ;;
esac
done
@@ -422,6 +424,15 @@ parse_flags_and_rev()
;;
esac
+ case "$1" in
+ *[!0-9]*)
+ :
+ ;;
+ *)
+ set -- "${ref_stash}@{$1}"
+ ;;
+ esac
+
REV=$(git rev-parse --symbolic --verify --quiet "$1") || {
reference="$1"
die "$(eval_gettext "\$reference is not a valid reference")"
diff --git a/git-svn.perl b/git-svn.perl
index 4d41d220a0..fa42364785 100755
--- a/git-svn.perl
+++ b/git-svn.perl
@@ -44,6 +44,7 @@ use Git qw(
command_close_pipe
command_bidi_pipe
command_close_bidi_pipe
+ get_record
);
BEGIN {
@@ -1699,7 +1700,7 @@ sub cmd_gc {
"files will not be compressed.\n";
}
File::Find::find({ wanted => \&gc_directory, no_chdir => 1},
- "$ENV{GIT_DIR}/svn");
+ Git::SVN::svn_dir());
}
########################### utility functions #########################
@@ -1733,7 +1734,7 @@ sub post_fetch_checkout {
return unless verify_ref('HEAD^0');
return if $ENV{GIT_DIR} !~ m#^(?:.*/)?\.git$#;
- my $index = $ENV{GIT_INDEX_FILE} || "$ENV{GIT_DIR}/index";
+ my $index = command_oneline(qw(rev-parse --git-path index));
return if -f $index;
return if command_oneline(qw/rev-parse --is-inside-work-tree/) eq 'false';
@@ -1835,8 +1836,9 @@ sub get_tree_from_treeish {
sub get_commit_entry {
my ($treeish) = shift;
my %log_entry = ( log => '', tree => get_tree_from_treeish($treeish) );
- my $commit_editmsg = "$ENV{GIT_DIR}/COMMIT_EDITMSG";
- my $commit_msg = "$ENV{GIT_DIR}/COMMIT_MSG";
+ my @git_path = qw(rev-parse --git-path);
+ my $commit_editmsg = command_oneline(@git_path, 'COMMIT_EDITMSG');
+ my $commit_msg = command_oneline(@git_path, 'COMMIT_MSG');
open my $log_fh, '>', $commit_editmsg or croak $!;
my $type = command_oneline(qw/cat-file -t/, $treeish);
@@ -1880,10 +1882,9 @@ sub get_commit_entry {
{
require Encode;
# SVN requires messages to be UTF-8 when entering the repo
- local $/;
open $log_fh, '<', $commit_msg or croak $!;
binmode $log_fh;
- chomp($log_entry{log} = <$log_fh>);
+ chomp($log_entry{log} = get_record($log_fh, undef));
my $enc = Git::config('i18n.commitencoding') || 'UTF-8';
my $msg = $log_entry{log};
diff --git a/hex.c b/hex.c
index ab2610e498..845b01a874 100644
--- a/hex.c
+++ b/hex.c
@@ -78,7 +78,8 @@ char *sha1_to_hex(const unsigned char *sha1)
{
static int bufno;
static char hexbuffer[4][GIT_SHA1_HEXSZ + 1];
- return sha1_to_hex_r(hexbuffer[3 & ++bufno], sha1);
+ bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
+ return sha1_to_hex_r(hexbuffer[bufno], sha1);
}
char *oid_to_hex(const struct object_id *oid)
diff --git a/http-backend.c b/http-backend.c
index adc8c8c3da..eef0a361f4 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -464,7 +464,7 @@ static void get_info_refs(struct strbuf *hdr, char *arg)
hdr_str(hdr, content_type, buf.buf);
end_headers(hdr);
- packet_write(1, "# service=git-%s\n", svc->name);
+ packet_write_fmt(1, "# service=git-%s\n", svc->name);
packet_flush(1);
argv[0] = svc->name;
diff --git a/pack-bitmap.c b/pack-bitmap.c
index b949e51716..39bcc16846 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -266,7 +266,7 @@ static int open_pack_bitmap_1(struct packed_git *packfile)
return -1;
idx_name = pack_bitmap_filename(packfile);
- fd = git_open_noatime(idx_name);
+ fd = git_open(idx_name);
free(idx_name);
if (fd < 0)
diff --git a/path.c b/path.c
index a8e72955f6..52d889c88e 100644
--- a/path.c
+++ b/path.c
@@ -25,7 +25,8 @@ static struct strbuf *get_pathname(void)
STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT
};
static int index;
- struct strbuf *sb = &pathname_array[3 & ++index];
+ struct strbuf *sb = &pathname_array[index];
+ index = (index + 1) % ARRAY_SIZE(pathname_array);
strbuf_reset(sb);
return sb;
}
diff --git a/perl/Git.pm b/perl/Git.pm
index 864123fe8e..b2732822af 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -59,7 +59,7 @@ require Exporter;
command_bidi_pipe command_close_bidi_pipe
version exec_path html_path hash_object git_cmd_try
remote_refs prompt
- get_tz_offset
+ get_tz_offset get_record
credential credential_read credential_write
temp_acquire temp_is_locked temp_release temp_reset temp_path);
@@ -538,6 +538,20 @@ sub get_tz_offset {
return sprintf("%s%02d%02d", $sign, (gmtime(abs($t - $gm)))[2,1]);
}
+=item get_record ( FILEHANDLE, INPUT_RECORD_SEPARATOR )
+
+Read one record from FILEHANDLE delimited by INPUT_RECORD_SEPARATOR,
+removing any trailing INPUT_RECORD_SEPARATOR.
+
+=cut
+
+sub get_record {
+ my ($fh, $rs) = @_;
+ local $/ = $rs;
+ my $rec = <$fh>;
+ chomp $rec if defined $rs;
+ $rec;
+}
=item prompt ( PROMPT , ISPASSWORD )
diff --git a/perl/Git/SVN.pm b/perl/Git/SVN.pm
index 018beb85a0..711d2687a3 100644
--- a/perl/Git/SVN.pm
+++ b/perl/Git/SVN.pm
@@ -807,10 +807,15 @@ sub get_fetch_range {
(++$min, $max);
}
+sub svn_dir {
+ command_oneline(qw(rev-parse --git-path svn));
+}
+
sub tmp_config {
my (@args) = @_;
- my $old_def_config = "$ENV{GIT_DIR}/svn/config";
- my $config = "$ENV{GIT_DIR}/svn/.metadata";
+ my $svn_dir = svn_dir();
+ my $old_def_config = "$svn_dir/config";
+ my $config = "$svn_dir/.metadata";
if (! -f $config && -f $old_def_config) {
rename $old_def_config, $config or
die "Failed rename $old_def_config => $config: $!\n";
@@ -1658,7 +1663,17 @@ sub tie_for_persistent_memoization {
if ($memo_backend > 0) {
tie %$hash => 'Git::SVN::Memoize::YAML', "$path.yaml";
} else {
- tie %$hash => 'Memoize::Storable', "$path.db", 'nstore';
+ # first verify that any existing file can actually be loaded
+ # (it may have been saved by an incompatible version)
+ my $db = "$path.db";
+ if (-e $db) {
+ use Storable qw(retrieve);
+
+ if (!eval { retrieve($db); 1 }) {
+ unlink $db or die "unlink $db failed: $!";
+ }
+ }
+ tie %$hash => 'Memoize::Storable', $db, 'nstore';
}
}
@@ -1671,7 +1686,7 @@ sub tie_for_persistent_memoization {
return if $memoized;
$memoized = 1;
- my $cache_path = "$ENV{GIT_DIR}/svn/.caches/";
+ my $cache_path = svn_dir() . '/.caches/';
mkpath([$cache_path]) unless -d $cache_path;
my %lookup_svn_merge_cache;
@@ -1712,7 +1727,7 @@ sub tie_for_persistent_memoization {
sub clear_memoized_mergeinfo_caches {
die "Only call this method in non-memoized context" if ($memoized);
- my $cache_path = "$ENV{GIT_DIR}/svn/.caches/";
+ my $cache_path = svn_dir() . '/.caches/';
return unless -d $cache_path;
for my $cache_file (("$cache_path/lookup_svn_merge",
@@ -2446,12 +2461,13 @@ sub _new {
"refs/remotes/$prefix$default_ref_id";
}
$_[1] = $repo_id;
- my $dir = "$ENV{GIT_DIR}/svn/$ref_id";
+ my $svn_dir = svn_dir();
+ my $dir = "$svn_dir/$ref_id";
- # Older repos imported by us used $GIT_DIR/svn/foo instead of
- # $GIT_DIR/svn/refs/remotes/foo when tracking refs/remotes/foo
+ # Older repos imported by us used $svn_dir/foo instead of
+ # $svn_dir/refs/remotes/foo when tracking refs/remotes/foo
if ($ref_id =~ m{^refs/remotes/(.+)}) {
- my $old_dir = "$ENV{GIT_DIR}/svn/$1";
+ my $old_dir = "$svn_dir/$1";
if (-d $old_dir && ! -d $dir) {
$dir = $old_dir;
}
@@ -2461,7 +2477,7 @@ sub _new {
mkpath([$dir]);
my $obj = bless {
ref_id => $ref_id, dir => $dir, index => "$dir/index",
- config => "$ENV{GIT_DIR}/svn/config",
+ config => "$svn_dir/config",
map_root => "$dir/.rev_map", repo_id => $repo_id }, $class;
# Ensure it gets canonicalized
diff --git a/perl/Git/SVN/Editor.pm b/perl/Git/SVN/Editor.pm
index 4c4199afec..0df16ed726 100644
--- a/perl/Git/SVN/Editor.pm
+++ b/perl/Git/SVN/Editor.pm
@@ -7,7 +7,9 @@ use SVN::Delta;
use Carp qw/croak/;
use Git qw/command command_oneline command_noisy command_output_pipe
command_input_pipe command_close_pipe
- command_bidi_pipe command_close_bidi_pipe/;
+ command_bidi_pipe command_close_bidi_pipe
+ get_record/;
+
BEGIN {
@ISA = qw(SVN::Delta::Editor);
}
@@ -57,11 +59,9 @@ sub generate_diff {
push @diff_tree, "-l$_rename_limit" if defined $_rename_limit;
push @diff_tree, $tree_a, $tree_b;
my ($diff_fh, $ctx) = command_output_pipe(@diff_tree);
- local $/ = "\0";
my $state = 'meta';
my @mods;
- while (<$diff_fh>) {
- chomp $_; # this gets rid of the trailing "\0"
+ while (defined($_ = get_record($diff_fh, "\0"))) {
if ($state eq 'meta' && /^:(\d{6})\s(\d{6})\s
($::sha1)\s($::sha1)\s
([MTCRAD])\d*$/xo) {
@@ -173,9 +173,7 @@ sub rmdirs {
my ($fh, $ctx) = command_output_pipe(qw/ls-tree --name-only -r -z/,
$self->{tree_b});
- local $/ = "\0";
- while (<$fh>) {
- chomp;
+ while (defined($_ = get_record($fh, "\0"))) {
my @dn = split m#/#, $_;
while (pop @dn) {
delete $rm->{join '/', @dn};
diff --git a/perl/Git/SVN/Fetcher.pm b/perl/Git/SVN/Fetcher.pm
index d8c21ad915..64e900a0e9 100644
--- a/perl/Git/SVN/Fetcher.pm
+++ b/perl/Git/SVN/Fetcher.pm
@@ -9,7 +9,8 @@ use Carp qw/croak/;
use File::Basename qw/dirname/;
use Git qw/command command_oneline command_noisy command_output_pipe
command_input_pipe command_close_pipe
- command_bidi_pipe command_close_bidi_pipe/;
+ command_bidi_pipe command_close_bidi_pipe
+ get_record/;
BEGIN {
@ISA = qw(SVN::Delta::Editor);
}
@@ -86,11 +87,9 @@ sub _mark_empty_symlinks {
my $printed_warning;
chomp(my $empty_blob = `git hash-object -t blob --stdin < /dev/null`);
my ($ls, $ctx) = command_output_pipe(qw/ls-tree -r -z/, $cmt);
- local $/ = "\0";
my $pfx = defined($switch_path) ? $switch_path : $git_svn->path;
$pfx .= '/' if length($pfx);
- while (<$ls>) {
- chomp;
+ while (defined($_ = get_record($ls, "\0"))) {
s/\A100644 blob $empty_blob\t//o or next;
unless ($printed_warning) {
print STDERR "Scanning for empty symlinks, ",
@@ -179,9 +178,7 @@ sub delete_entry {
my ($ls, $ctx) = command_output_pipe(qw/ls-tree
-r --name-only -z/,
$tree);
- local $/ = "\0";
- while (<$ls>) {
- chomp;
+ while (defined($_ = get_record($ls, "\0"))) {
my $rmpath = "$gpath/$_";
$self->{gii}->remove($rmpath);
print "\tD\t$rmpath\n" unless $::_q;
@@ -247,9 +244,7 @@ sub add_directory {
my ($ls, $ctx) = command_output_pipe(qw/ls-tree
-r --name-only -z/,
$self->{c});
- local $/ = "\0";
- while (<$ls>) {
- chomp;
+ while (defined($_ = get_record($ls, "\0"))) {
$self->{gii}->remove($_);
print "\tD\t$_\n" unless $::_q;
push @deleted_gpath, $gpath;
diff --git a/perl/Git/SVN/Migration.pm b/perl/Git/SVN/Migration.pm
index cf6ffa7581..dc90f6a621 100644
--- a/perl/Git/SVN/Migration.pm
+++ b/perl/Git/SVN/Migration.pm
@@ -44,7 +44,9 @@ use Git qw(
command_noisy
command_output_pipe
command_close_pipe
+ command_oneline
);
+use Git::SVN;
sub migrate_from_v0 {
my $git_dir = $ENV{GIT_DIR};
@@ -55,7 +57,9 @@ sub migrate_from_v0 {
chomp;
my ($id, $orig_ref) = ($_, $_);
next unless $id =~ s#^refs/heads/(.+)-HEAD$#$1#;
- next unless -f "$git_dir/$id/info/url";
+ my $info_url = command_oneline(qw(rev-parse --git-path),
+ "$id/info/url");
+ next unless -f $info_url;
my $new_ref = "refs/remotes/$id";
if (::verify_ref("$new_ref^0")) {
print STDERR "W: $orig_ref is probably an old ",
@@ -82,7 +86,7 @@ sub migrate_from_v1 {
my $git_dir = $ENV{GIT_DIR};
my $migrated = 0;
return $migrated unless -d $git_dir;
- my $svn_dir = "$git_dir/svn";
+ my $svn_dir = Git::SVN::svn_dir();
# just in case somebody used 'svn' as their $id at some point...
return $migrated if -d $svn_dir && ! -f "$svn_dir/info/url";
@@ -97,27 +101,28 @@ sub migrate_from_v1 {
my $x = $_;
next unless $x =~ s#^refs/remotes/##;
chomp $x;
- next unless -f "$git_dir/$x/info/url";
- my $u = eval { ::file_to_s("$git_dir/$x/info/url") };
+ my $info_url = command_oneline(qw(rev-parse --git-path),
+ "$x/info/url");
+ next unless -f $info_url;
+ my $u = eval { ::file_to_s($info_url) };
next unless $u;
- my $dn = dirname("$git_dir/svn/$x");
+ my $dn = dirname("$svn_dir/$x");
mkpath([$dn]) unless -d $dn;
if ($x eq 'svn') { # they used 'svn' as GIT_SVN_ID:
- mkpath(["$git_dir/svn/svn"]);
+ mkpath(["$svn_dir/svn"]);
print STDERR " - $git_dir/$x/info => ",
- "$git_dir/svn/$x/info\n";
- rename "$git_dir/$x/info", "$git_dir/svn/$x/info" or
+ "$svn_dir/$x/info\n";
+ rename "$git_dir/$x/info", "$svn_dir/$x/info" or
croak "$!: $x";
# don't worry too much about these, they probably
# don't exist with repos this old (save for index,
# and we can easily regenerate that)
foreach my $f (qw/unhandled.log index .rev_db/) {
- rename "$git_dir/$x/$f", "$git_dir/svn/$x/$f";
+ rename "$git_dir/$x/$f", "$svn_dir/$x/$f";
}
} else {
- print STDERR " - $git_dir/$x => $git_dir/svn/$x\n";
- rename "$git_dir/$x", "$git_dir/svn/$x" or
- croak "$!: $x";
+ print STDERR " - $git_dir/$x => $svn_dir/$x\n";
+ rename "$git_dir/$x", "$svn_dir/$x" or croak "$!: $x";
}
$migrated++;
}
@@ -139,9 +144,10 @@ sub read_old_urls {
push @dir, $_;
}
}
+ my $svn_dir = Git::SVN::svn_dir();
foreach (@dir) {
my $x = $_;
- $x =~ s!^\Q$ENV{GIT_DIR}\E/svn/!!o;
+ $x =~ s!^\Q$svn_dir\E/!!o;
read_old_urls($l_map, $x, $_);
}
}
@@ -150,7 +156,7 @@ sub migrate_from_v2 {
my @cfg = command(qw/config -l/);
return if grep /^svn-remote\..+\.url=/, @cfg;
my %l_map;
- read_old_urls(\%l_map, '', "$ENV{GIT_DIR}/svn");
+ read_old_urls(\%l_map, '', Git::SVN::svn_dir());
my $migrated = 0;
require Git::SVN;
@@ -239,7 +245,8 @@ sub minimize_connections {
}
}
if (@emptied) {
- my $file = $ENV{GIT_CONFIG} || "$ENV{GIT_DIR}/config";
+ my $file = $ENV{GIT_CONFIG} ||
+ command_oneline(qw(rev-parse --git-path config));
print STDERR <<EOF;
The following [svn-remote] sections in your config file ($file) are empty
and can be safely removed:
diff --git a/pkt-line.c b/pkt-line.c
index 30489c60b1..d4b6bfe076 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -91,16 +91,34 @@ void packet_flush(int fd)
write_or_die(fd, "0000", 4);
}
+int packet_flush_gently(int fd)
+{
+ packet_trace("0000", 4, 1);
+ if (write_in_full(fd, "0000", 4) == 4)
+ return 0;
+ return error("flush packet write failed");
+}
+
void packet_buf_flush(struct strbuf *buf)
{
packet_trace("0000", 4, 1);
strbuf_add(buf, "0000", 4);
}
-#define hex(a) (hexchar[(a) & 15])
-static void format_packet(struct strbuf *out, const char *fmt, va_list args)
+static void set_packet_header(char *buf, const int size)
{
static char hexchar[] = "0123456789abcdef";
+
+ #define hex(a) (hexchar[(a) & 15])
+ buf[0] = hex(size >> 12);
+ buf[1] = hex(size >> 8);
+ buf[2] = hex(size >> 4);
+ buf[3] = hex(size);
+ #undef hex
+}
+
+static void format_packet(struct strbuf *out, const char *fmt, va_list args)
+{
size_t orig_len, n;
orig_len = out->len;
@@ -111,23 +129,63 @@ static void format_packet(struct strbuf *out, const char *fmt, va_list args)
if (n > LARGE_PACKET_MAX)
die("protocol error: impossibly long line");
- out->buf[orig_len + 0] = hex(n >> 12);
- out->buf[orig_len + 1] = hex(n >> 8);
- out->buf[orig_len + 2] = hex(n >> 4);
- out->buf[orig_len + 3] = hex(n);
+ set_packet_header(&out->buf[orig_len], n);
packet_trace(out->buf + orig_len + 4, n - 4, 1);
}
-void packet_write(int fd, const char *fmt, ...)
+static int packet_write_fmt_1(int fd, int gently,
+ const char *fmt, va_list args)
+{
+ struct strbuf buf = STRBUF_INIT;
+ ssize_t count;
+
+ format_packet(&buf, fmt, args);
+ count = write_in_full(fd, buf.buf, buf.len);
+ if (count == buf.len)
+ return 0;
+
+ if (!gently) {
+ check_pipe(errno);
+ die_errno("packet write with format failed");
+ }
+ return error("packet write with format failed");
+}
+
+void packet_write_fmt(int fd, const char *fmt, ...)
{
- static struct strbuf buf = STRBUF_INIT;
va_list args;
- strbuf_reset(&buf);
va_start(args, fmt);
- format_packet(&buf, fmt, args);
+ packet_write_fmt_1(fd, 0, fmt, args);
+ va_end(args);
+}
+
+int packet_write_fmt_gently(int fd, const char *fmt, ...)
+{
+ int status;
+ va_list args;
+
+ va_start(args, fmt);
+ status = packet_write_fmt_1(fd, 1, fmt, args);
va_end(args);
- write_or_die(fd, buf.buf, buf.len);
+ return status;
+}
+
+static int packet_write_gently(const int fd_out, const char *buf, size_t size)
+{
+ static char packet_write_buffer[LARGE_PACKET_MAX];
+ size_t packet_size;
+
+ if (size > sizeof(packet_write_buffer) - 4)
+ return error("packet write failed - data exceeds max packet size");
+
+ packet_trace(buf, size, 1);
+ packet_size = size + 4;
+ set_packet_header(packet_write_buffer, packet_size);
+ memcpy(packet_write_buffer + 4, buf, size);
+ if (write_in_full(fd_out, packet_write_buffer, packet_size) == packet_size)
+ return 0;
+ return error("packet write failed");
}
void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
@@ -139,6 +197,46 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
va_end(args);
}
+int write_packetized_from_fd(int fd_in, int fd_out)
+{
+ static char buf[LARGE_PACKET_DATA_MAX];
+ int err = 0;
+ ssize_t bytes_to_write;
+
+ while (!err) {
+ bytes_to_write = xread(fd_in, buf, sizeof(buf));
+ if (bytes_to_write < 0)
+ return COPY_READ_ERROR;
+ if (bytes_to_write == 0)
+ break;
+ err = packet_write_gently(fd_out, buf, bytes_to_write);
+ }
+ if (!err)
+ err = packet_flush_gently(fd_out);
+ return err;
+}
+
+int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
+{
+ int err = 0;
+ size_t bytes_written = 0;
+ size_t bytes_to_write;
+
+ while (!err) {
+ if ((len - bytes_written) > LARGE_PACKET_DATA_MAX)
+ bytes_to_write = LARGE_PACKET_DATA_MAX;
+ else
+ bytes_to_write = len - bytes_written;
+ if (bytes_to_write == 0)
+ break;
+ err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
+ bytes_written += bytes_to_write;
+ }
+ if (!err)
+ err = packet_flush_gently(fd_out);
+ return err;
+}
+
static int get_packet_data(int fd, char **src_buf, size_t *src_size,
void *dst, unsigned size, int options)
{
@@ -229,3 +327,35 @@ char *packet_read_line_buf(char **src, size_t *src_len, int *dst_len)
{
return packet_read_line_generic(-1, src, src_len, dst_len);
}
+
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
+{
+ int packet_len;
+
+ size_t orig_len = sb_out->len;
+ size_t orig_alloc = sb_out->alloc;
+
+ for (;;) {
+ strbuf_grow(sb_out, LARGE_PACKET_DATA_MAX);
+ packet_len = packet_read(fd_in, NULL, NULL,
+ /* strbuf_grow() above always allocates one extra byte to
+ * store a '\0' at the end of the string. packet_read()
+ * writes a '\0' extra byte at the end, too. Let it know
+ * that there is already room for the extra byte.
+ */
+ sb_out->buf + sb_out->len, LARGE_PACKET_DATA_MAX+1,
+ PACKET_READ_GENTLE_ON_EOF);
+ if (packet_len <= 0)
+ break;
+ sb_out->len += packet_len;
+ }
+
+ if (packet_len < 0) {
+ if (orig_alloc == 0)
+ strbuf_release(sb_out);
+ else
+ strbuf_setlen(sb_out, orig_len);
+ return packet_len;
+ }
+ return sb_out->len - orig_len;
+}
diff --git a/pkt-line.h b/pkt-line.h
index 3cb9d91baa..18eac64830 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -20,9 +20,13 @@
* side can't, we stay with pure read/write interfaces.
*/
void packet_flush(int fd);
-void packet_write(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_flush(struct strbuf *buf);
void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+int packet_flush_gently(int fd);
+int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+int write_packetized_from_fd(int fd_in, int fd_out);
+int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
/*
* Read a packetized line into the buffer, which must be at least size bytes
@@ -75,8 +79,14 @@ char *packet_read_line(int fd, int *size);
*/
char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
+/*
+ * Reads a stream of variable sized packets until a flush packet is detected.
+ */
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
+
#define DEFAULT_PACKET_MAX 1000
#define LARGE_PACKET_MAX 65520
+#define LARGE_PACKET_DATA_MAX (LARGE_PACKET_MAX - 4)
extern char packet_buffer[LARGE_PACKET_MAX];
#endif
diff --git a/read-cache.c b/read-cache.c
index 38d67faf70..db5d910642 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -156,7 +156,14 @@ void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
{
int match = -1;
- int fd = open(ce->name, O_RDONLY);
+ static int cloexec = O_CLOEXEC;
+ int fd = open(ce->name, O_RDONLY | cloexec);
+
+ if ((cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
+ /* Try again w/o O_CLOEXEC: the kernel might not support it */
+ cloexec &= ~O_CLOEXEC;
+ fd = open(ce->name, O_RDONLY | cloexec);
+ }
if (fd >= 0) {
unsigned char sha1[20];
diff --git a/run-command.c b/run-command.c
index 5a4dbb66d7..ca905a9e80 100644
--- a/run-command.c
+++ b/run-command.c
@@ -21,6 +21,7 @@ void child_process_clear(struct child_process *child)
struct child_to_clean {
pid_t pid;
+ struct child_process *process;
struct child_to_clean *next;
};
static struct child_to_clean *children_to_clean;
@@ -31,6 +32,18 @@ static void cleanup_children(int sig, int in_signal)
while (children_to_clean) {
struct child_to_clean *p = children_to_clean;
children_to_clean = p->next;
+
+ if (p->process && !in_signal) {
+ struct child_process *process = p->process;
+ if (process->clean_on_exit_handler) {
+ trace_printf(
+ "trace: run_command: running exit handler for pid %"
+ PRIuMAX, (uintmax_t)p->pid
+ );
+ process->clean_on_exit_handler(process);
+ }
+ }
+
kill(p->pid, sig);
if (!in_signal)
free(p);
@@ -49,10 +62,11 @@ static void cleanup_children_on_exit(void)
cleanup_children(SIGTERM, 0);
}
-static void mark_child_for_cleanup(pid_t pid)
+static void mark_child_for_cleanup(pid_t pid, struct child_process *process)
{
struct child_to_clean *p = xmalloc(sizeof(*p));
p->pid = pid;
+ p->process = process;
p->next = children_to_clean;
children_to_clean = p;
@@ -422,7 +436,7 @@ fail_pipe:
if (cmd->pid < 0)
error_errno("cannot fork() for %s", cmd->argv[0]);
else if (cmd->clean_on_exit)
- mark_child_for_cleanup(cmd->pid);
+ mark_child_for_cleanup(cmd->pid, cmd);
/*
* Wait for child's execvp. If the execvp succeeds (or if fork()
@@ -483,7 +497,7 @@ fail_pipe:
if (cmd->pid < 0 && (!cmd->silent_exec_failure || errno != ENOENT))
error_errno("cannot spawn %s", cmd->argv[0]);
if (cmd->clean_on_exit && cmd->pid >= 0)
- mark_child_for_cleanup(cmd->pid);
+ mark_child_for_cleanup(cmd->pid, cmd);
argv_array_clear(&nargv);
cmd->argv = sargv;
@@ -634,7 +648,7 @@ int in_async(void)
return !pthread_equal(main_thread, pthread_self());
}
-void NORETURN async_exit(int code)
+static void NORETURN async_exit(int code)
{
pthread_exit((void *)(intptr_t)code);
}
@@ -684,13 +698,26 @@ int in_async(void)
return process_is_async;
}
-void NORETURN async_exit(int code)
+static void NORETURN async_exit(int code)
{
exit(code);
}
#endif
+void check_pipe(int err)
+{
+ if (err == EPIPE) {
+ if (in_async())
+ async_exit(141);
+
+ signal(SIGPIPE, SIG_DFL);
+ raise(SIGPIPE);
+ /* Should never happen, but just in case... */
+ exit(141);
+ }
+}
+
int start_async(struct async *async)
{
int need_in, need_out;
@@ -752,7 +779,7 @@ int start_async(struct async *async)
exit(!!async->proc(proc_in, proc_out, async->data));
}
- mark_child_for_cleanup(async->pid);
+ mark_child_for_cleanup(async->pid, NULL);
if (need_in)
close(fdin[0]);
diff --git a/run-command.h b/run-command.h
index 50666497ae..dd1c78c28d 100644
--- a/run-command.h
+++ b/run-command.h
@@ -43,6 +43,8 @@ struct child_process {
unsigned stdout_to_stderr:1;
unsigned use_shell:1;
unsigned clean_on_exit:1;
+ void (*clean_on_exit_handler)(struct child_process *process);
+ void *clean_on_exit_handler_cbdata;
};
#define CHILD_PROCESS_INIT { NULL, ARGV_ARRAY_INIT, ARGV_ARRAY_INIT }
@@ -139,7 +141,7 @@ struct async {
int start_async(struct async *async);
int finish_async(struct async *async);
int in_async(void);
-void NORETURN async_exit(int code);
+void check_pipe(int err);
/**
* This callback should initialize the child process and preload the
diff --git a/sequencer.c b/sequencer.c
index eec8a60d6b..5fd75f30dd 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -15,16 +15,46 @@
#include "merge-recursive.h"
#include "refs.h"
#include "argv-array.h"
+#include "quote.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
const char sign_off_header[] = "Signed-off-by: ";
static const char cherry_picked_prefix[] = "(cherry picked from commit ";
-static GIT_PATH_FUNC(git_path_todo_file, SEQ_TODO_FILE)
-static GIT_PATH_FUNC(git_path_opts_file, SEQ_OPTS_FILE)
-static GIT_PATH_FUNC(git_path_seq_dir, SEQ_DIR)
-static GIT_PATH_FUNC(git_path_head_file, SEQ_HEAD_FILE)
+GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
+
+static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
+static GIT_PATH_FUNC(git_path_opts_file, "sequencer/opts")
+static GIT_PATH_FUNC(git_path_head_file, "sequencer/head")
+
+/*
+ * A script to set the GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, and
+ * GIT_AUTHOR_DATE that will be used for the commit that is currently
+ * being rebased.
+ */
+static GIT_PATH_FUNC(rebase_path_author_script, "rebase-merge/author-script")
+/*
+ * The following files are written by git-rebase just after parsing the
+ * command-line (and are only consumed, not modified, by the sequencer).
+ */
+static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt")
+
+/* We will introduce the 'interactive rebase' mode later */
+static inline int is_rebase_i(const struct replay_opts *opts)
+{
+ return 0;
+}
+
+static const char *get_dir(const struct replay_opts *opts)
+{
+ return git_path_seq_dir();
+}
+
+static const char *get_todo_path(const struct replay_opts *opts)
+{
+ return git_path_todo_file();
+}
static int is_rfc2822_line(const char *buf, int len)
{
@@ -108,18 +138,37 @@ static int has_conforming_footer(struct strbuf *sb, struct strbuf *sob,
return 1;
}
-static void remove_sequencer_state(void)
+static const char *gpg_sign_opt_quoted(struct replay_opts *opts)
+{
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ if (opts->gpg_sign)
+ sq_quotef(&buf, "-S%s", opts->gpg_sign);
+ return buf.buf;
+}
+
+int sequencer_remove_state(struct replay_opts *opts)
{
- struct strbuf seq_dir = STRBUF_INIT;
+ struct strbuf dir = STRBUF_INIT;
+ int i;
+
+ free(opts->gpg_sign);
+ free(opts->strategy);
+ for (i = 0; i < opts->xopts_nr; i++)
+ free(opts->xopts[i]);
+ free(opts->xopts);
+
+ strbuf_addf(&dir, "%s", get_dir(opts));
+ remove_dir_recursively(&dir, 0);
+ strbuf_release(&dir);
- strbuf_addstr(&seq_dir, git_path(SEQ_DIR));
- remove_dir_recursively(&seq_dir, 0);
- strbuf_release(&seq_dir);
+ return 0;
}
static const char *action_name(const struct replay_opts *opts)
{
- return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
+ return opts->action == REPLAY_REVERT ? N_("revert") : N_("cherry-pick");
}
struct commit_message {
@@ -129,13 +178,18 @@ struct commit_message {
const char *message;
};
+static const char *short_commit_name(struct commit *commit)
+{
+ return find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
+}
+
static int get_message(struct commit *commit, struct commit_message *out)
{
const char *abbrev, *subject;
int subject_len;
out->message = logmsg_reencode(commit, NULL, get_commit_output_encoding());
- abbrev = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
+ abbrev = short_commit_name(commit);
subject_len = find_commit_subject(out->message, &subject);
@@ -180,22 +234,64 @@ static void print_advice(int show_hint, struct replay_opts *opts)
}
}
-static int write_message(struct strbuf *msgbuf, const char *filename)
+static int write_message(const void *buf, size_t len, const char *filename,
+ int append_eol)
{
static struct lock_file msg_file;
int msg_fd = hold_lock_file_for_update(&msg_file, filename, 0);
if (msg_fd < 0)
- return error_errno(_("Could not lock '%s'"), filename);
- if (write_in_full(msg_fd, msgbuf->buf, msgbuf->len) < 0)
- return error_errno(_("Could not write to %s"), filename);
- strbuf_release(msgbuf);
- if (commit_lock_file(&msg_file) < 0)
- return error(_("Error wrapping up %s."), filename);
+ return error_errno(_("could not lock '%s'"), filename);
+ if (write_in_full(msg_fd, buf, len) < 0) {
+ rollback_lock_file(&msg_file);
+ return error_errno(_("could not write to '%s'"), filename);
+ }
+ if (append_eol && write(msg_fd, "\n", 1) < 0) {
+ rollback_lock_file(&msg_file);
+ return error_errno(_("could not write eol to '%s"), filename);
+ }
+ if (commit_lock_file(&msg_file) < 0) {
+ rollback_lock_file(&msg_file);
+ return error(_("failed to finalize '%s'."), filename);
+ }
return 0;
}
+/*
+ * Reads a file that was presumably written by a shell script, i.e. with an
+ * end-of-line marker that needs to be stripped.
+ *
+ * Note that only the last end-of-line marker is stripped, consistent with the
+ * behavior of "$(cat path)" in a shell script.
+ *
+ * Returns 1 if the file was read, 0 if it could not be read or does not exist.
+ */
+static int read_oneliner(struct strbuf *buf,
+ const char *path, int skip_if_empty)
+{
+ int orig_len = buf->len;
+
+ if (!file_exists(path))
+ return 0;
+
+ if (strbuf_read_file(buf, path, 0) < 0) {
+ warning_errno(_("could not read '%s'"), path);
+ return 0;
+ }
+
+ if (buf->len > orig_len && buf->buf[buf->len - 1] == '\n') {
+ if (--buf->len > orig_len && buf->buf[buf->len - 1] == '\r')
+ --buf->len;
+ buf->buf[buf->len] = '\0';
+ }
+
+ if (skip_if_empty && buf->len == orig_len)
+ return 0;
+
+ return 1;
+}
+
static struct tree *empty_tree(void)
{
return lookup_tree(EMPTY_TREE_SHA1_BIN);
@@ -204,16 +300,13 @@ static struct tree *empty_tree(void)
static int error_dirty_index(struct replay_opts *opts)
{
if (read_cache_unmerged())
- return error_resolve_conflict(action_name(opts));
+ return error_resolve_conflict(_(action_name(opts)));
- /* Different translation strings for cherry-pick and revert */
- if (opts->action == REPLAY_PICK)
- error(_("Your local changes would be overwritten by cherry-pick."));
- else
- error(_("Your local changes would be overwritten by revert."));
+ error(_("your local changes would be overwritten by %s."),
+ _(action_name(opts)));
if (advice_commit_before_merge)
- advise(_("Commit your changes or stash them to proceed."));
+ advise(_("commit your changes or stash them to proceed."));
return -1;
}
@@ -228,7 +321,7 @@ static int fast_forward_to(const unsigned char *to, const unsigned char *from,
if (checkout_fast_forward(from, to, 1))
return -1; /* the callee should have complained already */
- strbuf_addf(&sb, _("%s: fast-forward"), action_name(opts));
+ strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts)));
transaction = ref_transaction_begin(&err);
if (!transaction ||
@@ -274,7 +367,7 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
struct merge_options o;
struct tree *result, *next_tree, *base_tree, *head_tree;
int clean;
- const char **xopt;
+ char **xopt;
static struct lock_file index_lock;
hold_locked_index(&index_lock, 1);
@@ -304,7 +397,7 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
/* TRANSLATORS: %s will be "revert" or "cherry-pick" */
return error(_("%s: Unable to write new index file"),
- action_name(opts));
+ _(action_name(opts)));
rollback_lock_file(&index_lock);
if (opts->signoff)
@@ -322,7 +415,7 @@ static int is_index_unchanged(void)
struct commit *head_commit;
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, head_sha1, NULL))
- return error(_("Could not resolve HEAD commit\n"));
+ return error(_("could not resolve HEAD commit\n"));
head_commit = lookup_commit(head_sha1);
@@ -342,41 +435,115 @@ static int is_index_unchanged(void)
if (!cache_tree_fully_valid(active_cache_tree))
if (cache_tree_update(&the_index, 0))
- return error(_("Unable to update cache tree\n"));
+ return error(_("unable to update cache tree\n"));
return !hashcmp(active_cache_tree->sha1, head_commit->tree->object.oid.hash);
}
/*
+ * Read the author-script file into an environment block, ready for use in
+ * run_command(), that can be free()d afterwards.
+ */
+static char **read_author_script(void)
+{
+ struct strbuf script = STRBUF_INIT;
+ int i, count = 0;
+ char *p, *p2, **env;
+ size_t env_size;
+
+ if (strbuf_read_file(&script, rebase_path_author_script(), 256) <= 0)
+ return NULL;
+
+ for (p = script.buf; *p; p++)
+ if (skip_prefix(p, "'\\\\''", (const char **)&p2))
+ strbuf_splice(&script, p - script.buf, p2 - p, "'", 1);
+ else if (*p == '\'')
+ strbuf_splice(&script, p-- - script.buf, 1, "", 0);
+ else if (*p == '\n') {
+ *p = '\0';
+ count++;
+ }
+
+ env_size = (count + 1) * sizeof(*env);
+ strbuf_grow(&script, env_size);
+ memmove(script.buf + env_size, script.buf, script.len);
+ p = script.buf + env_size;
+ env = (char **)strbuf_detach(&script, NULL);
+
+ for (i = 0; i < count; i++) {
+ env[i] = p;
+ p += strlen(p) + 1;
+ }
+ env[count] = NULL;
+
+ return env;
+}
+
+static const char staged_changes_advice[] =
+N_("you have staged changes in your working tree\n"
+"If these changes are meant to be squashed into the previous commit, run:\n"
+"\n"
+" git commit --amend %s\n"
+"\n"
+"If they are meant to go into a new commit, run:\n"
+"\n"
+" git commit %s\n"
+"\n"
+"In both cases, once you're done, continue with:\n"
+"\n"
+" git rebase --continue\n");
+
+/*
* If we are cherry-pick, and if the merge did not result in
* hand-editing, we will hit this commit and inherit the original
* author date and name.
+ *
* If we are revert, or if our cherry-pick results in a hand merge,
* we had better say that the current user is responsible for that.
+ *
+ * An exception is when run_git_commit() is called during an
+ * interactive rebase: in that case, we will want to retain the
+ * author metadata.
*/
static int run_git_commit(const char *defmsg, struct replay_opts *opts,
- int allow_empty)
+ int allow_empty, int edit, int amend,
+ int cleanup_commit_message)
{
+ char **env = NULL;
struct argv_array array;
int rc;
const char *value;
+ if (is_rebase_i(opts)) {
+ env = read_author_script();
+ if (!env) {
+ const char *gpg_opt = gpg_sign_opt_quoted(opts);
+
+ return error(_(staged_changes_advice),
+ gpg_opt, gpg_opt);
+ }
+ }
+
argv_array_init(&array);
argv_array_push(&array, "commit");
argv_array_push(&array, "-n");
+ if (amend)
+ argv_array_push(&array, "--amend");
if (opts->gpg_sign)
argv_array_pushf(&array, "-S%s", opts->gpg_sign);
if (opts->signoff)
argv_array_push(&array, "-s");
- if (!opts->edit) {
- argv_array_push(&array, "-F");
- argv_array_push(&array, defmsg);
- if (!opts->signoff &&
- !opts->record_origin &&
- git_config_get_value("commit.cleanup", &value))
- argv_array_push(&array, "--cleanup=verbatim");
- }
+ if (defmsg)
+ argv_array_pushl(&array, "-F", defmsg, NULL);
+ if (cleanup_commit_message)
+ argv_array_push(&array, "--cleanup=strip");
+ if (edit)
+ argv_array_push(&array, "-e");
+ else if (!cleanup_commit_message &&
+ !opts->signoff && !opts->record_origin &&
+ git_config_get_value("commit.cleanup", &value))
+ argv_array_push(&array, "--cleanup=verbatim");
if (allow_empty)
argv_array_push(&array, "--allow-empty");
@@ -384,8 +551,11 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
if (opts->allow_empty_message)
argv_array_push(&array, "--allow-empty-message");
- rc = run_command_v_opt(array.argv, RUN_GIT_CMD);
+ rc = run_command_v_opt_cd_env(array.argv, RUN_GIT_CMD, NULL,
+ (const char *const *)env);
argv_array_clear(&array);
+ free(env);
+
return rc;
}
@@ -394,12 +564,12 @@ static int is_original_commit_empty(struct commit *commit)
const unsigned char *ptree_sha1;
if (parse_commit(commit))
- return error(_("Could not parse commit %s\n"),
+ return error(_("could not parse commit %s\n"),
oid_to_hex(&commit->object.oid));
if (commit->parents) {
struct commit *parent = commit->parents->item;
if (parse_commit(parent))
- return error(_("Could not parse parent commit %s\n"),
+ return error(_("could not parse parent commit %s\n"),
oid_to_hex(&parent->object.oid));
ptree_sha1 = parent->tree->object.oid.hash;
} else {
@@ -447,7 +617,26 @@ static int allow_empty(struct replay_opts *opts, struct commit *commit)
return 1;
}
-static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
+enum todo_command {
+ TODO_PICK = 0,
+ TODO_REVERT
+};
+
+static const char *todo_command_strings[] = {
+ "pick",
+ "revert"
+};
+
+static const char *command_to_string(const enum todo_command command)
+{
+ if (command < ARRAY_SIZE(todo_command_strings))
+ return todo_command_strings[command];
+ die("Unknown command: %d", command);
+}
+
+
+static int do_pick_commit(enum todo_command command, struct commit *commit,
+ struct replay_opts *opts)
{
unsigned char head[20];
struct commit *base, *next, *parent;
@@ -464,12 +653,12 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
* to work on.
*/
if (write_cache_as_tree(head, 0, NULL))
- return error(_("Your index file is unmerged."));
+ return error(_("your index file is unmerged."));
} else {
unborn = get_sha1("HEAD", head);
if (unborn)
hashcpy(head, EMPTY_TREE_SHA1_BIN);
- if (index_differs_from(unborn ? EMPTY_TREE_SHA1_HEX : "HEAD", 0))
+ if (index_differs_from(unborn ? EMPTY_TREE_SHA1_HEX : "HEAD", 0, 0))
return error_dirty_index(opts);
}
discard_cache();
@@ -483,7 +672,7 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
struct commit_list *p;
if (!opts->mainline)
- return error(_("Commit %s is a merge but no -m option was given."),
+ return error(_("commit %s is a merge but no -m option was given."),
oid_to_hex(&commit->object.oid));
for (cnt = 1, p = commit->parents;
@@ -491,11 +680,11 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
cnt++)
p = p->next;
if (cnt != opts->mainline || !p)
- return error(_("Commit %s does not have parent %d"),
+ return error(_("commit %s does not have parent %d"),
oid_to_hex(&commit->object.oid), opts->mainline);
parent = p->item;
} else if (0 < opts->mainline)
- return error(_("Mainline was specified but commit %s is not a merge."),
+ return error(_("mainline was specified but commit %s is not a merge."),
oid_to_hex(&commit->object.oid));
else
parent = commit->parents->item;
@@ -506,13 +695,14 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
return fast_forward_to(commit->object.oid.hash, head, unborn, opts);
if (parent && parse_commit(parent) < 0)
- /* TRANSLATORS: The first %s will be "revert" or
- "cherry-pick", the second %s a SHA1 */
+ /* TRANSLATORS: The first %s will be a "todo" command like
+ "revert" or "pick", the second %s a SHA1. */
return error(_("%s: cannot parse parent commit %s"),
- action_name(opts), oid_to_hex(&parent->object.oid));
+ command_to_string(command),
+ oid_to_hex(&parent->object.oid));
if (get_message(commit, &msg) != 0)
- return error(_("Cannot get commit message for %s"),
+ return error(_("cannot get commit message for %s"),
oid_to_hex(&commit->object.oid));
/*
@@ -522,7 +712,7 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
* reverse of it if we are revert.
*/
- if (opts->action == REPLAY_REVERT) {
+ if (command == TODO_REVERT) {
base = commit;
base_label = msg.label;
next = parent;
@@ -563,25 +753,29 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
}
}
- if (!opts->strategy || !strcmp(opts->strategy, "recursive") || opts->action == REPLAY_REVERT) {
+ if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
res = do_recursive_merge(base, next, base_label, next_label,
head, &msgbuf, opts);
if (res < 0)
return res;
- res |= write_message(&msgbuf, git_path_merge_msg());
+ res |= write_message(msgbuf.buf, msgbuf.len,
+ git_path_merge_msg(), 0);
} else {
struct commit_list *common = NULL;
struct commit_list *remotes = NULL;
- res = write_message(&msgbuf, git_path_merge_msg());
+ res = write_message(msgbuf.buf, msgbuf.len,
+ git_path_merge_msg(), 0);
commit_list_insert(base, &common);
commit_list_insert(next, &remotes);
- res |= try_merge_command(opts->strategy, opts->xopts_nr, opts->xopts,
+ res |= try_merge_command(opts->strategy,
+ opts->xopts_nr, (const char **)opts->xopts,
common, sha1_to_hex(head), remotes);
free_commit_list(common);
free_commit_list(remotes);
}
+ strbuf_release(&msgbuf);
/*
* If the merge was clean or if it failed due to conflict, we write
@@ -589,21 +783,20 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
* However, if the merge did not even start, then we don't want to
* write it at all.
*/
- if (opts->action == REPLAY_PICK && !opts->no_commit && (res == 0 || res == 1) &&
+ if (command == TODO_PICK && !opts->no_commit && (res == 0 || res == 1) &&
update_ref(NULL, "CHERRY_PICK_HEAD", commit->object.oid.hash, NULL,
REF_NODEREF, UPDATE_REFS_MSG_ON_ERR))
res = -1;
- if (opts->action == REPLAY_REVERT && ((opts->no_commit && res == 0) || res == 1) &&
+ if (command == TODO_REVERT && ((opts->no_commit && res == 0) || res == 1) &&
update_ref(NULL, "REVERT_HEAD", commit->object.oid.hash, NULL,
REF_NODEREF, UPDATE_REFS_MSG_ON_ERR))
res = -1;
if (res) {
- error(opts->action == REPLAY_REVERT
+ error(command == TODO_REVERT
? _("could not revert %s... %s")
: _("could not apply %s... %s"),
- find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV),
- msg.subject);
+ short_commit_name(commit), msg.subject);
print_advice(res == 1, opts);
rerere(opts->allow_rerere_auto);
goto leave;
@@ -615,7 +808,8 @@ static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
goto leave;
}
if (!opts->no_commit)
- res = run_git_commit(git_path_merge_msg(), opts, allow);
+ res = run_git_commit(opts->edit ? NULL : git_path_merge_msg(),
+ opts, allow, opts->edit, 0, 0);
leave:
free_message(commit, &msg);
@@ -647,133 +841,160 @@ static int read_and_refresh_cache(struct replay_opts *opts)
if (read_index_preload(&the_index, NULL) < 0) {
rollback_lock_file(&index_lock);
return error(_("git %s: failed to read the index"),
- action_name(opts));
+ _(action_name(opts)));
}
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
if (the_index.cache_changed && index_fd >= 0) {
if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) {
rollback_lock_file(&index_lock);
return error(_("git %s: failed to refresh the index"),
- action_name(opts));
+ _(action_name(opts)));
}
}
rollback_lock_file(&index_lock);
return 0;
}
-static int format_todo(struct strbuf *buf, struct commit_list *todo_list,
- struct replay_opts *opts)
+struct todo_item {
+ enum todo_command command;
+ struct commit *commit;
+ const char *arg;
+ int arg_len;
+ size_t offset_in_buf;
+};
+
+struct todo_list {
+ struct strbuf buf;
+ struct todo_item *items;
+ int nr, alloc, current;
+};
+
+#define TODO_LIST_INIT { STRBUF_INIT }
+
+static void todo_list_release(struct todo_list *todo_list)
{
- struct commit_list *cur = NULL;
- const char *sha1_abbrev = NULL;
- const char *action_str = opts->action == REPLAY_REVERT ? "revert" : "pick";
- const char *subject;
- int subject_len;
+ strbuf_release(&todo_list->buf);
+ free(todo_list->items);
+ todo_list->items = NULL;
+ todo_list->nr = todo_list->alloc = 0;
+}
- for (cur = todo_list; cur; cur = cur->next) {
- const char *commit_buffer = get_commit_buffer(cur->item, NULL);
- sha1_abbrev = find_unique_abbrev(cur->item->object.oid.hash, DEFAULT_ABBREV);
- subject_len = find_commit_subject(commit_buffer, &subject);
- strbuf_addf(buf, "%s %s %.*s\n", action_str, sha1_abbrev,
- subject_len, subject);
- unuse_commit_buffer(cur->item, commit_buffer);
- }
- return 0;
+static struct todo_item *append_new_todo(struct todo_list *todo_list)
+{
+ ALLOC_GROW(todo_list->items, todo_list->nr + 1, todo_list->alloc);
+ return todo_list->items + todo_list->nr++;
}
-static struct commit *parse_insn_line(char *bol, char *eol, struct replay_opts *opts)
+static int parse_insn_line(struct todo_item *item, const char *bol, char *eol)
{
unsigned char commit_sha1[20];
- enum replay_action action;
char *end_of_object_name;
- int saved, status, padding;
-
- if (starts_with(bol, "pick")) {
- action = REPLAY_PICK;
- bol += strlen("pick");
- } else if (starts_with(bol, "revert")) {
- action = REPLAY_REVERT;
- bol += strlen("revert");
- } else
- return NULL;
+ int i, saved, status, padding;
+
+ /* left-trim */
+ bol += strspn(bol, " \t");
+
+ for (i = 0; i < ARRAY_SIZE(todo_command_strings); i++)
+ if (skip_prefix(bol, todo_command_strings[i], &bol)) {
+ item->command = i;
+ break;
+ }
+ if (i >= ARRAY_SIZE(todo_command_strings))
+ return -1;
/* Eat up extra spaces/ tabs before object name */
padding = strspn(bol, " \t");
if (!padding)
- return NULL;
+ return -1;
bol += padding;
- end_of_object_name = bol + strcspn(bol, " \t\n");
+ end_of_object_name = (char *) bol + strcspn(bol, " \t\n");
saved = *end_of_object_name;
*end_of_object_name = '\0';
status = get_sha1(bol, commit_sha1);
*end_of_object_name = saved;
- /*
- * Verify that the action matches up with the one in
- * opts; we don't support arbitrary instructions
- */
- if (action != opts->action) {
- if (action == REPLAY_REVERT)
- error((opts->action == REPLAY_REVERT)
- ? _("Cannot revert during another revert.")
- : _("Cannot revert during a cherry-pick."));
- else
- error((opts->action == REPLAY_REVERT)
- ? _("Cannot cherry-pick during a revert.")
- : _("Cannot cherry-pick during another cherry-pick."));
- return NULL;
- }
+ item->arg = end_of_object_name + strspn(end_of_object_name, " \t");
+ item->arg_len = (int)(eol - item->arg);
if (status < 0)
- return NULL;
+ return -1;
- return lookup_commit_reference(commit_sha1);
+ item->commit = lookup_commit_reference(commit_sha1);
+ return !item->commit;
}
-static int parse_insn_buffer(char *buf, struct commit_list **todo_list,
- struct replay_opts *opts)
+static int parse_insn_buffer(char *buf, struct todo_list *todo_list)
{
- struct commit_list **next = todo_list;
- struct commit *commit;
- char *p = buf;
- int i;
+ struct todo_item *item;
+ char *p = buf, *next_p;
+ int i, res = 0;
- for (i = 1; *p; i++) {
+ for (i = 1; *p; i++, p = next_p) {
char *eol = strchrnul(p, '\n');
- commit = parse_insn_line(p, eol, opts);
- if (!commit)
- return error(_("Could not parse line %d."), i);
- next = commit_list_append(commit, next);
- p = *eol ? eol + 1 : eol;
+
+ next_p = *eol ? eol + 1 /* skip LF */ : eol;
+
+ if (p != eol && eol[-1] == '\r')
+ eol--; /* strip Carriage Return */
+
+ item = append_new_todo(todo_list);
+ item->offset_in_buf = p - todo_list->buf.buf;
+ if (parse_insn_line(item, p, eol)) {
+ res = error(_("invalid line %d: %.*s"),
+ i, (int)(eol - p), p);
+ item->command = -1;
+ }
}
- if (!*todo_list)
- return error(_("No commits parsed."));
- return 0;
+ if (!todo_list->nr)
+ return error(_("no commits parsed."));
+ return res;
}
-static int read_populate_todo(struct commit_list **todo_list,
+static int read_populate_todo(struct todo_list *todo_list,
struct replay_opts *opts)
{
- struct strbuf buf = STRBUF_INIT;
+ const char *todo_file = get_todo_path(opts);
int fd, res;
- fd = open(git_path_todo_file(), O_RDONLY);
+ strbuf_reset(&todo_list->buf);
+ fd = open(todo_file, O_RDONLY);
if (fd < 0)
- return error_errno(_("Could not open %s"),
- git_path_todo_file());
- if (strbuf_read(&buf, fd, 0) < 0) {
+ return error_errno(_("could not open '%s'"), todo_file);
+ if (strbuf_read(&todo_list->buf, fd, 0) < 0) {
close(fd);
- strbuf_release(&buf);
- return error(_("Could not read %s."), git_path_todo_file());
+ return error(_("could not read '%s'."), todo_file);
}
close(fd);
- res = parse_insn_buffer(buf.buf, todo_list, opts);
- strbuf_release(&buf);
+ res = parse_insn_buffer(todo_list->buf.buf, todo_list);
if (res)
- return error(_("Unusable instruction sheet: %s"),
- git_path_todo_file());
+ return error(_("unusable instruction sheet: '%s'"), todo_file);
+
+ if (!is_rebase_i(opts)) {
+ enum todo_command valid =
+ opts->action == REPLAY_PICK ? TODO_PICK : TODO_REVERT;
+ int i;
+
+ for (i = 0; i < todo_list->nr; i++)
+ if (valid == todo_list->items[i].command)
+ continue;
+ else if (valid == TODO_PICK)
+ return error(_("cannot cherry-pick during a revert."));
+ else
+ return error(_("cannot revert during a cherry-pick."));
+ }
+
+ return 0;
+}
+
+static int git_config_string_dup(char **dest,
+ const char *var, const char *value)
+{
+ if (!value)
+ return config_error_nonbool(var);
+ free(*dest);
+ *dest = xstrdup(value);
return 0;
}
@@ -797,23 +1018,39 @@ static int populate_opts_cb(const char *key, const char *value, void *data)
else if (!strcmp(key, "options.mainline"))
opts->mainline = git_config_int(key, value);
else if (!strcmp(key, "options.strategy"))
- git_config_string(&opts->strategy, key, value);
+ git_config_string_dup(&opts->strategy, key, value);
else if (!strcmp(key, "options.gpg-sign"))
- git_config_string(&opts->gpg_sign, key, value);
+ git_config_string_dup(&opts->gpg_sign, key, value);
else if (!strcmp(key, "options.strategy-option")) {
ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
opts->xopts[opts->xopts_nr++] = xstrdup(value);
} else
- return error(_("Invalid key: %s"), key);
+ return error(_("invalid key: %s"), key);
if (!error_flag)
- return error(_("Invalid value for %s: %s"), key, value);
+ return error(_("invalid value for %s: %s"), key, value);
return 0;
}
-static int read_populate_opts(struct replay_opts **opts)
+static int read_populate_opts(struct replay_opts *opts)
{
+ if (is_rebase_i(opts)) {
+ struct strbuf buf = STRBUF_INIT;
+
+ if (read_oneliner(&buf, rebase_path_gpg_sign_opt(), 1)) {
+ if (!starts_with(buf.buf, "-S"))
+ strbuf_reset(&buf);
+ else {
+ free(opts->gpg_sign);
+ opts->gpg_sign = xstrdup(buf.buf + 2);
+ }
+ }
+ strbuf_release(&buf);
+
+ return 0;
+ }
+
if (!file_exists(git_path_opts_file()))
return 0;
/*
@@ -822,24 +1059,39 @@ static int read_populate_opts(struct replay_opts **opts)
* about this case, though, because we wrote that file ourselves, so we
* are pretty certain that it is syntactically correct.
*/
- if (git_config_from_file(populate_opts_cb, git_path_opts_file(), *opts) < 0)
- return error(_("Malformed options sheet: %s"),
+ if (git_config_from_file(populate_opts_cb, git_path_opts_file(), opts) < 0)
+ return error(_("malformed options sheet: '%s'"),
git_path_opts_file());
return 0;
}
-static int walk_revs_populate_todo(struct commit_list **todo_list,
+static int walk_revs_populate_todo(struct todo_list *todo_list,
struct replay_opts *opts)
{
+ enum todo_command command = opts->action == REPLAY_PICK ?
+ TODO_PICK : TODO_REVERT;
+ const char *command_string = todo_command_strings[command];
struct commit *commit;
- struct commit_list **next;
if (prepare_revs(opts))
return -1;
- next = todo_list;
- while ((commit = get_revision(opts->revs)))
- next = commit_list_append(commit, next);
+ while ((commit = get_revision(opts->revs))) {
+ struct todo_item *item = append_new_todo(todo_list);
+ const char *commit_buffer = get_commit_buffer(commit, NULL);
+ const char *subject;
+ int subject_len;
+
+ item->command = command;
+ item->commit = commit;
+ item->arg = NULL;
+ item->arg_len = 0;
+ item->offset_in_buf = todo_list->buf.len;
+ subject_len = find_commit_subject(commit_buffer, &subject);
+ strbuf_addf(&todo_list->buf, "%s %s %.*s\n", command_string,
+ short_commit_name(commit), subject_len, subject);
+ unuse_commit_buffer(commit, commit_buffer);
+ }
return 0;
}
@@ -851,7 +1103,7 @@ static int create_seq_dir(void)
return -1;
}
else if (mkdir(git_path_seq_dir(), 0777) < 0)
- return error_errno(_("Could not create sequencer directory %s"),
+ return error_errno(_("could not create sequencer directory '%s'"),
git_path_seq_dir());
return 0;
}
@@ -865,17 +1117,17 @@ static int save_head(const char *head)
fd = hold_lock_file_for_update(&head_lock, git_path_head_file(), 0);
if (fd < 0) {
rollback_lock_file(&head_lock);
- return error_errno(_("Could not lock HEAD"));
+ return error_errno(_("could not lock HEAD"));
}
strbuf_addf(&buf, "%s\n", head);
if (write_in_full(fd, buf.buf, buf.len) < 0) {
rollback_lock_file(&head_lock);
- return error_errno(_("Could not write to %s"),
+ return error_errno(_("could not write to '%s'"),
git_path_head_file());
}
if (commit_lock_file(&head_lock) < 0) {
rollback_lock_file(&head_lock);
- return error(_("Error wrapping up %s."), git_path_head_file());
+ return error(_("failed to finalize '%s'."), git_path_head_file());
}
return 0;
}
@@ -904,7 +1156,7 @@ static int rollback_single_pick(void)
return reset_for_rollback(head_sha1);
}
-static int sequencer_rollback(struct replay_opts *opts)
+int sequencer_rollback(struct replay_opts *opts)
{
FILE *f;
unsigned char sha1[20];
@@ -920,9 +1172,9 @@ static int sequencer_rollback(struct replay_opts *opts)
return rollback_single_pick();
}
if (!f)
- return error_errno(_("cannot open %s"), git_path_head_file());
+ return error_errno(_("cannot open '%s'"), git_path_head_file());
if (strbuf_getline_lf(&buf, f)) {
- error(_("cannot read %s: %s"), git_path_head_file(),
+ error(_("cannot read '%s': %s"), git_path_head_file(),
ferror(f) ? strerror(errno) : _("unexpected end of file"));
fclose(f);
goto fail;
@@ -939,38 +1191,29 @@ static int sequencer_rollback(struct replay_opts *opts)
}
if (reset_for_rollback(sha1))
goto fail;
- remove_sequencer_state();
strbuf_release(&buf);
- return 0;
+ return sequencer_remove_state(opts);
fail:
strbuf_release(&buf);
return -1;
}
-static int save_todo(struct commit_list *todo_list, struct replay_opts *opts)
+static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
{
static struct lock_file todo_lock;
- struct strbuf buf = STRBUF_INIT;
- int fd;
+ const char *todo_path = get_todo_path(opts);
+ int next = todo_list->current, offset, fd;
- fd = hold_lock_file_for_update(&todo_lock, git_path_todo_file(), 0);
+ fd = hold_lock_file_for_update(&todo_lock, todo_path, 0);
if (fd < 0)
- return error_errno(_("Could not lock '%s'"),
- git_path_todo_file());
- if (format_todo(&buf, todo_list, opts) < 0) {
- strbuf_release(&buf);
- return error(_("Could not format %s."), git_path_todo_file());
- }
- if (write_in_full(fd, buf.buf, buf.len) < 0) {
- strbuf_release(&buf);
- return error_errno(_("Could not write to %s"),
- git_path_todo_file());
- }
- if (commit_lock_file(&todo_lock) < 0) {
- strbuf_release(&buf);
- return error(_("Error wrapping up %s."), git_path_todo_file());
- }
- strbuf_release(&buf);
+ return error_errno(_("could not lock '%s'"), todo_path);
+ offset = next < todo_list->nr ?
+ todo_list->items[next].offset_in_buf : todo_list->buf.len;
+ if (write_in_full(fd, todo_list->buf.buf + offset,
+ todo_list->buf.len - offset) < 0)
+ return error_errno(_("could not write to '%s'"), todo_path);
+ if (commit_lock_file(&todo_lock) < 0)
+ return error(_("failed to finalize '%s'."), todo_path);
return 0;
}
@@ -1009,9 +1252,8 @@ static int save_opts(struct replay_opts *opts)
return res;
}
-static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
+static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
{
- struct commit_list *cur;
int res;
setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
@@ -1021,10 +1263,12 @@ static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
if (read_and_refresh_cache(opts))
return -1;
- for (cur = todo_list; cur; cur = cur->next) {
- if (save_todo(cur, opts))
+ while (todo_list->current < todo_list->nr) {
+ struct todo_item *item = todo_list->items + todo_list->current;
+ if (save_todo(todo_list, opts))
return -1;
- res = do_pick_commit(cur->item, opts);
+ res = do_pick_commit(item->command, item->commit, opts);
+ todo_list->current++;
if (res)
return res;
}
@@ -1033,8 +1277,7 @@ static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
* Sequence of picks finished successfully; cleanup by
* removing the .git/sequencer directory
*/
- remove_sequencer_state();
- return 0;
+ return sequencer_remove_state(opts);
}
static int continue_single_pick(void)
@@ -1047,61 +1290,56 @@ static int continue_single_pick(void)
return run_command_v_opt(argv, RUN_GIT_CMD);
}
-static int sequencer_continue(struct replay_opts *opts)
+int sequencer_continue(struct replay_opts *opts)
{
- struct commit_list *todo_list = NULL;
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res;
- if (!file_exists(git_path_todo_file()))
+ if (read_and_refresh_cache(opts))
+ return -1;
+
+ if (!file_exists(get_todo_path(opts)))
return continue_single_pick();
- if (read_populate_opts(&opts) ||
- read_populate_todo(&todo_list, opts))
+ if (read_populate_opts(opts))
return -1;
+ if ((res = read_populate_todo(&todo_list, opts)))
+ goto release_todo_list;
/* Verify that the conflict has been resolved */
if (file_exists(git_path_cherry_pick_head()) ||
file_exists(git_path_revert_head())) {
- int ret = continue_single_pick();
- if (ret)
- return ret;
+ res = continue_single_pick();
+ if (res)
+ goto release_todo_list;
}
- if (index_differs_from("HEAD", 0))
- return error_dirty_index(opts);
- todo_list = todo_list->next;
- return pick_commits(todo_list, opts);
+ if (index_differs_from("HEAD", 0, 0)) {
+ res = error_dirty_index(opts);
+ goto release_todo_list;
+ }
+ todo_list.current++;
+ res = pick_commits(&todo_list, opts);
+release_todo_list:
+ todo_list_release(&todo_list);
+ return res;
}
static int single_pick(struct commit *cmit, struct replay_opts *opts)
{
setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- return do_pick_commit(cmit, opts);
+ return do_pick_commit(opts->action == REPLAY_PICK ?
+ TODO_PICK : TODO_REVERT, cmit, opts);
}
int sequencer_pick_revisions(struct replay_opts *opts)
{
- struct commit_list *todo_list = NULL;
+ struct todo_list todo_list = TODO_LIST_INIT;
unsigned char sha1[20];
- int i;
-
- if (opts->subcommand == REPLAY_NONE)
- assert(opts->revs);
+ int i, res;
+ assert(opts->revs);
if (read_and_refresh_cache(opts))
return -1;
- /*
- * Decide what to do depending on the arguments; a fresh
- * cherry-pick should be handled differently from an existing
- * one that is being continued
- */
- if (opts->subcommand == REPLAY_REMOVE_STATE) {
- remove_sequencer_state();
- return 0;
- }
- if (opts->subcommand == REPLAY_ROLLBACK)
- return sequencer_rollback(opts);
- if (opts->subcommand == REPLAY_CONTINUE)
- return sequencer_continue(opts);
-
for (i = 0; i < opts->revs->pending.nr; i++) {
unsigned char sha1[20];
const char *name = opts->revs->pending.objects[i].name;
@@ -1150,12 +1388,14 @@ int sequencer_pick_revisions(struct replay_opts *opts)
create_seq_dir() < 0)
return -1;
if (get_sha1("HEAD", sha1) && (opts->action == REPLAY_REVERT))
- return error(_("Can't revert as initial commit"));
+ return error(_("can't revert as initial commit"));
if (save_head(sha1_to_hex(sha1)))
return -1;
if (save_opts(opts))
return -1;
- return pick_commits(todo_list, opts);
+ res = pick_commits(&todo_list, opts);
+ todo_list_release(&todo_list);
+ return res;
}
void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag)
diff --git a/sequencer.h b/sequencer.h
index 5ed5cb1d97..7a513c576b 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -1,10 +1,7 @@
#ifndef SEQUENCER_H
#define SEQUENCER_H
-#define SEQ_DIR "sequencer"
-#define SEQ_HEAD_FILE "sequencer/head"
-#define SEQ_TODO_FILE "sequencer/todo"
-#define SEQ_OPTS_FILE "sequencer/opts"
+const char *git_path_seq_dir(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
@@ -13,16 +10,8 @@ enum replay_action {
REPLAY_PICK
};
-enum replay_subcommand {
- REPLAY_NONE,
- REPLAY_REMOVE_STATE,
- REPLAY_CONTINUE,
- REPLAY_ROLLBACK
-};
-
struct replay_opts {
enum replay_action action;
- enum replay_subcommand subcommand;
/* Boolean options */
int edit;
@@ -37,18 +26,22 @@ struct replay_opts {
int mainline;
- const char *gpg_sign;
+ char *gpg_sign;
/* Merge strategy */
- const char *strategy;
- const char **xopts;
+ char *strategy;
+ char **xopts;
size_t xopts_nr, xopts_alloc;
/* Only used by REPLAY_NONE */
struct rev_info *revs;
};
+#define REPLAY_OPTS_INIT { -1 }
int sequencer_pick_revisions(struct replay_opts *opts);
+int sequencer_continue(struct replay_opts *opts);
+int sequencer_rollback(struct replay_opts *opts);
+int sequencer_remove_state(struct replay_opts *opts);
extern const char sign_off_header[];
diff --git a/sha1_file.c b/sha1_file.c
index 2eda9291ee..5457314e6a 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -370,7 +370,7 @@ void read_info_alternates(const char * relative_base, int depth)
int fd;
path = xstrfmt("%s/info/alternates", relative_base);
- fd = git_open_noatime(path);
+ fd = git_open(path);
free(path);
if (fd < 0)
return;
@@ -663,7 +663,7 @@ static int check_packed_git_idx(const char *path, struct packed_git *p)
struct pack_idx_header *hdr;
size_t idx_size;
uint32_t version, nr, i, *index;
- int fd = git_open_noatime(path);
+ int fd = git_open(path);
struct stat st;
if (fd < 0)
@@ -1069,7 +1069,7 @@ static int open_packed_git_1(struct packed_git *p)
while (pack_max_fds <= pack_open_fds && close_one_pack())
; /* nothing */
- p->pack_fd = git_open_noatime(p->pack_name);
+ p->pack_fd = git_open(p->pack_name);
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
return -1;
pack_open_fds++;
@@ -1410,6 +1410,32 @@ static void prepare_packed_git_one(char *objdir, int local)
strbuf_release(&path);
}
+static int approximate_object_count_valid;
+
+/*
+ * Give a fast, rough count of the number of objects in the repository. This
+ * ignores loose objects completely. If you have a lot of them, then either
+ * you should repack because your performance will be awful, or they are
+ * all unreachable objects about to be pruned, in which case they're not really
+ * interesting as a measure of repo size in the first place.
+ */
+unsigned long approximate_object_count(void)
+{
+ static unsigned long count;
+ if (!approximate_object_count_valid) {
+ struct packed_git *p;
+
+ prepare_packed_git();
+ count = 0;
+ for (p = packed_git; p; p = p->next) {
+ if (open_pack_index(p))
+ continue;
+ count += p->num_objects;
+ }
+ }
+ return count;
+}
+
static void *get_next_packed_git(const void *p)
{
return ((const struct packed_git *)p)->next;
@@ -1481,6 +1507,7 @@ void prepare_packed_git(void)
void reprepare_packed_git(void)
{
+ approximate_object_count_valid = 0;
prepare_packed_git_run_once = 0;
prepare_packed_git();
}
@@ -1559,9 +1586,9 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
return hashcmp(sha1, real_sha1) ? -1 : 0;
}
-int git_open_noatime(const char *name)
+int git_open(const char *name)
{
- static int sha1_file_open_flag = O_NOATIME;
+ static int sha1_file_open_flag = O_NOATIME | O_CLOEXEC;
for (;;) {
int fd;
@@ -1571,12 +1598,17 @@ int git_open_noatime(const char *name)
if (fd >= 0)
return fd;
- /* Might the failure be due to O_NOATIME? */
- if (errno != ENOENT && sha1_file_open_flag) {
- sha1_file_open_flag = 0;
+ /* Try again w/o O_CLOEXEC: the kernel might not support it */
+ if ((sha1_file_open_flag & O_CLOEXEC) && errno == EINVAL) {
+ sha1_file_open_flag &= ~O_CLOEXEC;
continue;
}
+ /* Might the failure be due to O_NOATIME? */
+ if (errno != ENOENT && (sha1_file_open_flag & O_NOATIME)) {
+ sha1_file_open_flag &= ~O_NOATIME;
+ continue;
+ }
return -1;
}
}
@@ -1605,7 +1637,7 @@ static int open_sha1_file(const unsigned char *sha1)
struct alternate_object_database *alt;
int most_interesting_errno;
- fd = git_open_noatime(sha1_file_name(sha1));
+ fd = git_open(sha1_file_name(sha1));
if (fd >= 0)
return fd;
most_interesting_errno = errno;
@@ -1613,7 +1645,7 @@ static int open_sha1_file(const unsigned char *sha1)
prepare_alt_odb();
for (alt = alt_odb_list; alt; alt = alt->next) {
const char *path = alt_sha1_path(alt, sha1);
- fd = git_open_noatime(path);
+ fd = git_open(path);
if (fd >= 0)
return fd;
if (most_interesting_errno == ENOENT)
diff --git a/sha1_name.c b/sha1_name.c
index 45aa26b322..73a915ff1b 100644
--- a/sha1_name.c
+++ b/sha1_name.c
@@ -448,10 +448,46 @@ int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
return ret;
}
+/*
+ * Return the slot of the most-significant bit set in "val". There are various
+ * ways to do this quickly with fls() or __builtin_clzl(), but speed is
+ * probably not a big deal here.
+ */
+static unsigned msb(unsigned long val)
+{
+ unsigned r = 0;
+ while (val >>= 1)
+ r++;
+ return r;
+}
+
int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
{
int status, exists;
+ if (len < 0) {
+ unsigned long count = approximate_object_count();
+ /*
+ * Add one because the MSB only tells us the highest bit set,
+ * not including the value of all the _other_ bits (so "15"
+ * is only one off of 2^4, but the MSB is the 3rd bit.
+ */
+ len = msb(count) + 1;
+ /*
+ * We now know we have on the order of 2^len objects, which
+ * expects a collision at 2^(len/2). But we also care about hex
+ * chars, not bits, and there are 4 bits per hex. So all
+ * together we need to divide by 2; but we also want to round
+ * odd numbers up, hence adding one before dividing.
+ */
+ len = (len + 1) / 2;
+ /*
+ * For very small repos, we stick with our regular fallback.
+ */
+ if (len < FALLBACK_DEFAULT_ABBREV)
+ len = FALLBACK_DEFAULT_ABBREV;
+ }
+
sha1_to_hex_r(hex, sha1);
if (len == 40 || !len)
return 40;
diff --git a/shallow.c b/shallow.c
index 2531e3af3b..4d0b005d39 100644
--- a/shallow.c
+++ b/shallow.c
@@ -338,7 +338,7 @@ static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *c
{
int fd = *(int *)cb;
if (graft->nr_parent == -1)
- packet_write(fd, "shallow %s\n", oid_to_hex(&graft->oid));
+ packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
return 0;
}
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index e799e59544..a20b9f58e3 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -4,13 +4,72 @@ test_description='blob conversion via gitattributes'
. ./test-lib.sh
-cat <<EOF >rot13.sh
+TEST_ROOT="$(pwd)"
+
+cat <<EOF >"$TEST_ROOT/rot13.sh"
#!$SHELL_PATH
tr \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM'
EOF
-chmod +x rot13.sh
+chmod +x "$TEST_ROOT/rot13.sh"
+
+generate_random_characters () {
+ LEN=$1
+ NAME=$2
+ test-genrandom some-seed $LEN |
+ perl -pe "s/./chr((ord($&) % 26) + ord('a'))/sge" >"$TEST_ROOT/$NAME"
+}
+
+file_size () {
+ cat "$1" | wc -c | sed "s/^[ ]*//"
+}
+
+filter_git () {
+ rm -f rot13-filter.log &&
+ git "$@" 2>git-stderr.log &&
+ rm -f git-stderr.log
+}
+
+# Compare two files and ensure that `clean` and `smudge` respectively are
+# called at least once if specified in the `expect` file. The actual
+# invocation count is not relevant because their number can vary.
+# c.f. http://public-inbox.org/git/xmqqshv18i8i.fsf@gitster.mtv.corp.google.com/
+test_cmp_count () {
+ expect=$1
+ actual=$2
+ for FILE in "$expect" "$actual"
+ do
+ sort "$FILE" | uniq -c | sed "s/^[ ]*//" |
+ sed "s/^\([0-9]\) IN: clean/x IN: clean/" |
+ sed "s/^\([0-9]\) IN: smudge/x IN: smudge/" >"$FILE.tmp" &&
+ mv "$FILE.tmp" "$FILE"
+ done &&
+ test_cmp "$expect" "$actual"
+}
+
+# Compare two files but exclude all `clean` invocations because Git can
+# call `clean` zero or more times.
+# c.f. http://public-inbox.org/git/xmqqshv18i8i.fsf@gitster.mtv.corp.google.com/
+test_cmp_exclude_clean () {
+ expect=$1
+ actual=$2
+ for FILE in "$expect" "$actual"
+ do
+ grep -v "IN: clean" "$FILE" >"$FILE.tmp" &&
+ mv "$FILE.tmp" "$FILE"
+ done &&
+ test_cmp "$expect" "$actual"
+}
+
+# Check that the contents of two files are equal and that their rot13 version
+# is equal to the committed content.
+test_cmp_committed_rot13 () {
+ test_cmp "$1" "$2" &&
+ "$TEST_ROOT/rot13.sh" <"$1" >expected &&
+ git cat-file blob :"$2" >actual &&
+ test_cmp expected actual
+}
test_expect_success setup '
git config filter.rot13.smudge ./rot13.sh &&
@@ -31,15 +90,18 @@ test_expect_success setup '
cat test >test.i &&
git add test test.t test.i &&
rm -f test test.t test.i &&
- git checkout -- test test.t test.i
+ git checkout -- test test.t test.i &&
+
+ echo "content-test2" >test2.o &&
+ echo "content-test3 - filename with special characters" >"test3 '\''sq'\'',\$x.o"
'
script='s/^\$Id: \([0-9a-f]*\) \$/\1/p'
test_expect_success check '
- cmp test.o test &&
- cmp test.o test.t &&
+ test_cmp test.o test &&
+ test_cmp test.o test.t &&
# ident should be stripped in the repository
git diff --raw --exit-code :test :test.i &&
@@ -47,10 +109,10 @@ test_expect_success check '
embedded=$(sed -ne "$script" test.i) &&
test "z$id" = "z$embedded" &&
- git cat-file blob :test.t > test.r &&
+ git cat-file blob :test.t >test.r &&
- ./rot13.sh < test.o > test.t &&
- cmp test.r test.t
+ ./rot13.sh <test.o >test.t &&
+ test_cmp test.r test.t
'
# If an expanded ident ever gets into the repository, we want to make sure that
@@ -130,7 +192,7 @@ test_expect_success 'filter shell-escaped filenames' '
# delete the files and check them out again, using a smudge filter
# that will count the args and echo the command-line back to us
- git config filter.argc.smudge "sh ./argc.sh %f" &&
+ test_config filter.argc.smudge "sh ./argc.sh %f" &&
rm "$normal" "$special" &&
git checkout -- "$normal" "$special" &&
@@ -141,7 +203,7 @@ test_expect_success 'filter shell-escaped filenames' '
test_cmp expect "$special" &&
# do the same thing, but with more args in the filter expression
- git config filter.argc.smudge "sh ./argc.sh %f --my-extra-arg" &&
+ test_config filter.argc.smudge "sh ./argc.sh %f --my-extra-arg" &&
rm "$normal" "$special" &&
git checkout -- "$normal" "$special" &&
@@ -154,9 +216,9 @@ test_expect_success 'filter shell-escaped filenames' '
'
test_expect_success 'required filter should filter data' '
- git config filter.required.smudge ./rot13.sh &&
- git config filter.required.clean ./rot13.sh &&
- git config filter.required.required true &&
+ test_config filter.required.smudge ./rot13.sh &&
+ test_config filter.required.clean ./rot13.sh &&
+ test_config filter.required.required true &&
echo "*.r filter=required" >.gitattributes &&
@@ -165,17 +227,17 @@ test_expect_success 'required filter should filter data' '
rm -f test.r &&
git checkout -- test.r &&
- cmp test.o test.r &&
+ test_cmp test.o test.r &&
./rot13.sh <test.o >expected &&
git cat-file blob :test.r >actual &&
- cmp expected actual
+ test_cmp expected actual
'
test_expect_success 'required filter smudge failure' '
- git config filter.failsmudge.smudge false &&
- git config filter.failsmudge.clean cat &&
- git config filter.failsmudge.required true &&
+ test_config filter.failsmudge.smudge false &&
+ test_config filter.failsmudge.clean cat &&
+ test_config filter.failsmudge.required true &&
echo "*.fs filter=failsmudge" >.gitattributes &&
@@ -186,9 +248,9 @@ test_expect_success 'required filter smudge failure' '
'
test_expect_success 'required filter clean failure' '
- git config filter.failclean.smudge cat &&
- git config filter.failclean.clean false &&
- git config filter.failclean.required true &&
+ test_config filter.failclean.smudge cat &&
+ test_config filter.failclean.clean false &&
+ test_config filter.failclean.required true &&
echo "*.fc filter=failclean" >.gitattributes &&
@@ -197,8 +259,8 @@ test_expect_success 'required filter clean failure' '
'
test_expect_success 'filtering large input to small output should use little memory' '
- git config filter.devnull.clean "cat >/dev/null" &&
- git config filter.devnull.required true &&
+ test_config filter.devnull.clean "cat >/dev/null" &&
+ test_config filter.devnull.required true &&
for i in $(test_seq 1 30); do printf "%1048576d" 1; done >30MB &&
echo "30MB filter=devnull" >.gitattributes &&
GIT_MMAP_LIMIT=1m GIT_ALLOC_LIMIT=1m git add 30MB
@@ -207,7 +269,7 @@ test_expect_success 'filtering large input to small output should use little mem
test_expect_success 'filter that does not read is fine' '
test-genrandom foo $((128 * 1024 + 1)) >big &&
echo "big filter=epipe" >.gitattributes &&
- git config filter.epipe.clean "echo xyzzy" &&
+ test_config filter.epipe.clean "echo xyzzy" &&
git add big &&
git cat-file blob :big >actual &&
echo xyzzy >expect &&
@@ -215,20 +277,20 @@ test_expect_success 'filter that does not read is fine' '
'
test_expect_success EXPENSIVE 'filter large file' '
- git config filter.largefile.smudge cat &&
- git config filter.largefile.clean cat &&
+ test_config filter.largefile.smudge cat &&
+ test_config filter.largefile.clean cat &&
for i in $(test_seq 1 2048); do printf "%1048576d" 1; done >2GB &&
echo "2GB filter=largefile" >.gitattributes &&
git add 2GB 2>err &&
- ! test -s err &&
+ test_must_be_empty err &&
rm -f 2GB &&
git checkout -- 2GB 2>err &&
- ! test -s err
+ test_must_be_empty err
'
test_expect_success "filter: clean empty file" '
- git config filter.in-repo-header.clean "echo cleaned && cat" &&
- git config filter.in-repo-header.smudge "sed 1d" &&
+ test_config filter.in-repo-header.clean "echo cleaned && cat" &&
+ test_config filter.in-repo-header.smudge "sed 1d" &&
echo "empty-in-worktree filter=in-repo-header" >>.gitattributes &&
>empty-in-worktree &&
@@ -240,8 +302,8 @@ test_expect_success "filter: clean empty file" '
'
test_expect_success "filter: smudge empty file" '
- git config filter.empty-in-repo.clean "cat >/dev/null" &&
- git config filter.empty-in-repo.smudge "echo smudged && cat" &&
+ test_config filter.empty-in-repo.clean "cat >/dev/null" &&
+ test_config filter.empty-in-repo.smudge "echo smudged && cat" &&
echo "empty-in-repo filter=empty-in-repo" >>.gitattributes &&
echo dead data walking >empty-in-repo &&
@@ -279,4 +341,380 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' '
test_line_count = 0 count
'
+test_expect_success PERL 'required process filter should filter data' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ test_config_global filter.protocol.required true &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "git-stderr.log" >.gitignore &&
+ echo "*.r filter=protocol" >.gitattributes &&
+ git add . &&
+ git commit . -m "test commit 1" &&
+ git branch empty-branch &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ cp "$TEST_ROOT/test2.o" test2.r &&
+ mkdir testsubdir &&
+ cp "$TEST_ROOT/test3 '\''sq'\'',\$x.o" "testsubdir/test3 '\''sq'\'',\$x.r" &&
+ >test4-empty.r &&
+
+ S=$(file_size test.r) &&
+ S2=$(file_size test2.r) &&
+ S3=$(file_size "testsubdir/test3 '\''sq'\'',\$x.r") &&
+
+ filter_git add . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ IN: clean test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ IN: clean test4-empty.r 0 [OK] -- OUT: 0 [OK]
+ IN: clean testsubdir/test3 '\''sq'\'',\$x.r $S3 [OK] -- OUT: $S3 . [OK]
+ STOP
+ EOF
+ test_cmp_count expected.log rot13-filter.log &&
+
+ filter_git commit . -m "test commit 2" &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ IN: clean test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ IN: clean test4-empty.r 0 [OK] -- OUT: 0 [OK]
+ IN: clean testsubdir/test3 '\''sq'\'',\$x.r $S3 [OK] -- OUT: $S3 . [OK]
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ IN: clean test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ IN: clean test4-empty.r 0 [OK] -- OUT: 0 [OK]
+ IN: clean testsubdir/test3 '\''sq'\'',\$x.r $S3 [OK] -- OUT: $S3 . [OK]
+ STOP
+ EOF
+ test_cmp_count expected.log rot13-filter.log &&
+
+ rm -f test2.r "testsubdir/test3 '\''sq'\'',\$x.r" &&
+
+ filter_git checkout --quiet --no-progress . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ IN: smudge testsubdir/test3 '\''sq'\'',\$x.r $S3 [OK] -- OUT: $S3 . [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ filter_git checkout --quiet --no-progress empty-branch &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ filter_git checkout --quiet --no-progress master &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge test.r $S [OK] -- OUT: $S . [OK]
+ IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ IN: smudge test4-empty.r 0 [OK] -- OUT: 0 [OK]
+ IN: smudge testsubdir/test3 '\''sq'\'',\$x.r $S3 [OK] -- OUT: $S3 . [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
+ test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
+ test_cmp_committed_rot13 "$TEST_ROOT/test3 '\''sq'\'',\$x.o" "testsubdir/test3 '\''sq'\'',\$x.r"
+ )
+'
+
+test_expect_success PERL 'required process filter takes precedence' '
+ test_config_global filter.protocol.clean false &&
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean" &&
+ test_config_global filter.protocol.required true &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+ cp "$TEST_ROOT/test.o" test.r &&
+ S=$(file_size test.r) &&
+
+ # Check that the process filter is invoked here
+ filter_git add . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ STOP
+ EOF
+ test_cmp_count expected.log rot13-filter.log
+ )
+'
+
+test_expect_success PERL 'required process filter should be used only for "clean" operation only' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean" &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+ cp "$TEST_ROOT/test.o" test.r &&
+ S=$(file_size test.r) &&
+
+ filter_git add . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean test.r $S [OK] -- OUT: $S . [OK]
+ STOP
+ EOF
+ test_cmp_count expected.log rot13-filter.log &&
+
+ rm test.r &&
+
+ filter_git checkout --quiet --no-progress . &&
+ # If the filter would be used for "smudge", too, we would see
+ # "IN: smudge test.r 57 [OK] -- OUT: 57 . [OK]" here
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log
+ )
+'
+
+test_expect_success PERL 'required process filter should process multiple packets' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ test_config_global filter.protocol.required true &&
+
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ # Generate data requiring 1, 2, 3 packets
+ S=65516 && # PKTLINE_DATA_MAXLEN -> Maximal size of a packet
+ generate_random_characters $(($S )) 1pkt_1__.file &&
+ generate_random_characters $(($S +1)) 2pkt_1+1.file &&
+ generate_random_characters $(($S*2-1)) 2pkt_2-1.file &&
+ generate_random_characters $(($S*2 )) 2pkt_2__.file &&
+ generate_random_characters $(($S*2+1)) 3pkt_2+1.file &&
+
+ for FILE in "$TEST_ROOT"/*.file
+ do
+ cp "$FILE" . &&
+ "$TEST_ROOT/rot13.sh" <"$FILE" >"$FILE.rot13"
+ done &&
+
+ echo "*.file filter=protocol" >.gitattributes &&
+ filter_git add *.file .gitattributes &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: clean 1pkt_1__.file $(($S )) [OK] -- OUT: $(($S )) . [OK]
+ IN: clean 2pkt_1+1.file $(($S +1)) [OK] -- OUT: $(($S +1)) .. [OK]
+ IN: clean 2pkt_2-1.file $(($S*2-1)) [OK] -- OUT: $(($S*2-1)) .. [OK]
+ IN: clean 2pkt_2__.file $(($S*2 )) [OK] -- OUT: $(($S*2 )) .. [OK]
+ IN: clean 3pkt_2+1.file $(($S*2+1)) [OK] -- OUT: $(($S*2+1)) ... [OK]
+ STOP
+ EOF
+ test_cmp_count expected.log rot13-filter.log &&
+
+ rm -f *.file &&
+
+ filter_git checkout --quiet --no-progress -- *.file &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge 1pkt_1__.file $(($S )) [OK] -- OUT: $(($S )) . [OK]
+ IN: smudge 2pkt_1+1.file $(($S +1)) [OK] -- OUT: $(($S +1)) .. [OK]
+ IN: smudge 2pkt_2-1.file $(($S*2-1)) [OK] -- OUT: $(($S*2-1)) .. [OK]
+ IN: smudge 2pkt_2__.file $(($S*2 )) [OK] -- OUT: $(($S*2 )) .. [OK]
+ IN: smudge 3pkt_2+1.file $(($S*2+1)) [OK] -- OUT: $(($S*2+1)) ... [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ for FILE in *.file
+ do
+ test_cmp_committed_rot13 "$TEST_ROOT/$FILE" $FILE
+ done
+ )
+'
+
+test_expect_success PERL 'required process filter with clean error should fail' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ test_config_global filter.protocol.required true &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ echo "this is going to fail" >clean-write-fail.r &&
+ echo "content-test3-subdir" >test3.r &&
+
+ test_must_fail git add .
+ )
+'
+
+test_expect_success PERL 'process filter should restart after unexpected write failure' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ cp "$TEST_ROOT/test2.o" test2.r &&
+ echo "this is going to fail" >smudge-write-fail.o &&
+ cp smudge-write-fail.o smudge-write-fail.r &&
+
+ S=$(file_size test.r) &&
+ S2=$(file_size test2.r) &&
+ SF=$(file_size smudge-write-fail.r) &&
+
+ git add . &&
+ rm -f *.r &&
+
+ rm -f rot13-filter.log &&
+ git checkout --quiet --no-progress . 2>git-stderr.log &&
+
+ grep "smudge write error at" git-stderr.log &&
+ grep "error: external filter" git-stderr.log &&
+
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge smudge-write-fail.r $SF [OK] -- OUT: $SF [WRITE FAIL]
+ START
+ init handshake complete
+ IN: smudge test.r $S [OK] -- OUT: $S . [OK]
+ IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
+ test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
+
+ # Smudge failed
+ ! test_cmp smudge-write-fail.o smudge-write-fail.r &&
+ "$TEST_ROOT/rot13.sh" <smudge-write-fail.o >expected &&
+ git cat-file blob :smudge-write-fail.r >actual &&
+ test_cmp expected actual
+ )
+'
+
+test_expect_success PERL 'process filter should not be restarted if it signals an error' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ cp "$TEST_ROOT/test2.o" test2.r &&
+ echo "this will cause an error" >error.o &&
+ cp error.o error.r &&
+
+ S=$(file_size test.r) &&
+ S2=$(file_size test2.r) &&
+ SE=$(file_size error.r) &&
+
+ git add . &&
+ rm -f *.r &&
+
+ filter_git checkout --quiet --no-progress . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge error.r $SE [OK] -- OUT: 0 [ERROR]
+ IN: smudge test.r $S [OK] -- OUT: $S . [OK]
+ IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
+ test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
+ test_cmp error.o error.r
+ )
+'
+
+test_expect_success PERL 'process filter abort stops processing of all further files' '
+ test_config_global filter.protocol.process "$TEST_DIRECTORY/t0021/rot13-filter.pl clean smudge" &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ cp "$TEST_ROOT/test2.o" test2.r &&
+ echo "error this blob and all future blobs" >abort.o &&
+ cp abort.o abort.r &&
+
+ SA=$(file_size abort.r) &&
+
+ git add . &&
+ rm -f *.r &&
+
+ # Note: This test assumes that Git filters files in alphabetical
+ # order ("abort.r" before "test.r").
+ filter_git checkout --quiet --no-progress . &&
+ cat >expected.log <<-EOF &&
+ START
+ init handshake complete
+ IN: smudge abort.r $SA [OK] -- OUT: 0 [ABORT]
+ STOP
+ EOF
+ test_cmp_exclude_clean expected.log rot13-filter.log &&
+
+ test_cmp "$TEST_ROOT/test.o" test.r &&
+ test_cmp "$TEST_ROOT/test2.o" test2.r &&
+ test_cmp abort.o abort.r
+ )
+'
+
+test_expect_success PERL 'invalid process filter must fail (and not hang!)' '
+ test_config_global filter.protocol.process cat &&
+ test_config_global filter.protocol.required true &&
+ rm -rf repo &&
+ mkdir repo &&
+ (
+ cd repo &&
+ git init &&
+
+ echo "*.r filter=protocol" >.gitattributes &&
+
+ cp "$TEST_ROOT/test.o" test.r &&
+ test_must_fail git add . 2>git-stderr.log &&
+ grep "does not support filter protocol version" git-stderr.log
+ )
+'
+
test_done
diff --git a/t/t0021/rot13-filter.pl b/t/t0021/rot13-filter.pl
new file mode 100755
index 0000000000..ae4c50f5c5
--- /dev/null
+++ b/t/t0021/rot13-filter.pl
@@ -0,0 +1,192 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git filter protocol version 2
+# See Documentation/gitattributes.txt, section "Filter Protocol"
+#
+# The script takes the list of supported protocol capabilities as
+# arguments ("clean", "smudge", etc).
+#
+# This implementation supports special test cases:
+# (1) If data with the pathname "clean-write-fail.r" is processed with
+# a "clean" operation then the write operation will die.
+# (2) If data with the pathname "smudge-write-fail.r" is processed with
+# a "smudge" operation then the write operation will die.
+# (3) If data with the pathname "error.r" is processed with any
+# operation then the filter signals that it cannot or does not want
+# to process the file.
+# (4) If data with the pathname "abort.r" is processed with any
+# operation then the filter signals that it cannot or does not want
+# to process the file and any file after that is processed with the
+# same command.
+#
+
+use strict;
+use warnings;
+
+my $MAX_PACKET_CONTENT_SIZE = 65516;
+my @capabilities = @ARGV;
+
+open my $debug, ">>", "rot13-filter.log" or die "cannot open log file: $!";
+
+sub rot13 {
+ my $str = shift;
+ $str =~ y/A-Za-z/N-ZA-Mn-za-m/;
+ return $str;
+}
+
+sub packet_bin_read {
+ my $buffer;
+ my $bytes_read = read STDIN, $buffer, 4;
+ if ( $bytes_read == 0 ) {
+ # EOF - Git stopped talking to us!
+ print $debug "STOP\n";
+ exit();
+ }
+ elsif ( $bytes_read != 4 ) {
+ die "invalid packet: '$buffer'";
+ }
+ my $pkt_size = hex($buffer);
+ if ( $pkt_size == 0 ) {
+ return ( 1, "" );
+ }
+ elsif ( $pkt_size > 4 ) {
+ my $content_size = $pkt_size - 4;
+ $bytes_read = read STDIN, $buffer, $content_size;
+ if ( $bytes_read != $content_size ) {
+ die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+ }
+ return ( 0, $buffer );
+ }
+ else {
+ die "invalid packet size: $pkt_size";
+ }
+}
+
+sub packet_txt_read {
+ my ( $res, $buf ) = packet_bin_read();
+ unless ( $buf =~ s/\n$// ) {
+ die "A non-binary line MUST be terminated by an LF.";
+ }
+ return ( $res, $buf );
+}
+
+sub packet_bin_write {
+ my $buf = shift;
+ print STDOUT sprintf( "%04x", length($buf) + 4 );
+ print STDOUT $buf;
+ STDOUT->flush();
+}
+
+sub packet_txt_write {
+ packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+ print STDOUT sprintf( "%04x", 0 );
+ STDOUT->flush();
+}
+
+print $debug "START\n";
+$debug->flush();
+
+( packet_txt_read() eq ( 0, "git-filter-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=2" ) ) || die "bad version";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad version end";
+
+packet_txt_write("git-filter-server");
+packet_txt_write("version=2");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=clean" ) ) || die "bad capability";
+( packet_txt_read() eq ( 0, "capability=smudge" ) ) || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
+
+foreach (@capabilities) {
+ packet_txt_write( "capability=" . $_ );
+}
+packet_flush();
+print $debug "init handshake complete\n";
+$debug->flush();
+
+while (1) {
+ my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+ print $debug "IN: $command";
+ $debug->flush();
+
+ my ($pathname) = packet_txt_read() =~ /^pathname=([^=]+)$/;
+ print $debug " $pathname";
+ $debug->flush();
+
+ # Flush
+ packet_bin_read();
+
+ my $input = "";
+ {
+ binmode(STDIN);
+ my $buffer;
+ my $done = 0;
+ while ( !$done ) {
+ ( $done, $buffer ) = packet_bin_read();
+ $input .= $buffer;
+ }
+ print $debug " " . length($input) . " [OK] -- ";
+ $debug->flush();
+ }
+
+ my $output;
+ if ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
+ $output = "";
+ }
+ elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
+ $output = rot13($input);
+ }
+ elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
+ $output = rot13($input);
+ }
+ else {
+ die "bad command '$command'";
+ }
+
+ print $debug "OUT: " . length($output) . " ";
+ $debug->flush();
+
+ if ( $pathname eq "error.r" ) {
+ print $debug "[ERROR]\n";
+ $debug->flush();
+ packet_txt_write("status=error");
+ packet_flush();
+ }
+ elsif ( $pathname eq "abort.r" ) {
+ print $debug "[ABORT]\n";
+ $debug->flush();
+ packet_txt_write("status=abort");
+ packet_flush();
+ }
+ else {
+ packet_txt_write("status=success");
+ packet_flush();
+
+ if ( $pathname eq "${command}-write-fail.r" ) {
+ print $debug "[WRITE FAIL]\n";
+ $debug->flush();
+ die "${command} write error";
+ }
+
+ while ( length($output) > 0 ) {
+ my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
+ packet_bin_write($packet);
+ # dots represent the number of packets
+ print $debug ".";
+ if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
+ $output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
+ }
+ else {
+ $output = "";
+ }
+ }
+ packet_flush();
+ print $debug " [OK]\n";
+ $debug->flush();
+ packet_flush();
+ }
+}
diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh
index bf2deee109..444b5a4df8 100755
--- a/t/t0060-path-utils.sh
+++ b/t/t0060-path-utils.sh
@@ -305,8 +305,9 @@ test_git_path GIT_COMMON_DIR=bar config bar/config
test_git_path GIT_COMMON_DIR=bar packed-refs bar/packed-refs
test_git_path GIT_COMMON_DIR=bar shallow bar/shallow
-# In the tests below, the distinction between $PWD and $(pwd) is important:
-# on Windows, $PWD is POSIX style (/c/foo), $(pwd) has drive letter (c:/foo).
+# In the tests below, $(pwd) must be used because it is a native path on
+# Windows and avoids MSYS's path mangling (which simplifies "foo/../bar" and
+# strips the dot from trailing "/.").
test_submodule_relative_url "../" "../foo" "../submodule" "../../submodule"
test_submodule_relative_url "../" "../foo/bar" "../submodule" "../../foo/submodule"
@@ -314,27 +315,29 @@ test_submodule_relative_url "../" "../foo/submodule" "../submodule" "../../foo/s
test_submodule_relative_url "../" "./foo" "../submodule" "../submodule"
test_submodule_relative_url "../" "./foo/bar" "../submodule" "../foo/submodule"
test_submodule_relative_url "../../../" "../foo/bar" "../sub/a/b/c" "../../../../foo/sub/a/b/c"
-test_submodule_relative_url "../" "$PWD/addtest" "../repo" "$(pwd)/repo"
+test_submodule_relative_url "../" "$(pwd)/addtest" "../repo" "$(pwd)/repo"
test_submodule_relative_url "../" "foo/bar" "../submodule" "../foo/submodule"
test_submodule_relative_url "../" "foo" "../submodule" "../submodule"
test_submodule_relative_url "(null)" "../foo/bar" "../sub/a/b/c" "../foo/sub/a/b/c"
+test_submodule_relative_url "(null)" "../foo/bar" "../sub/a/b/c/" "../foo/sub/a/b/c"
+test_submodule_relative_url "(null)" "../foo/bar/" "../sub/a/b/c" "../foo/sub/a/b/c"
test_submodule_relative_url "(null)" "../foo/bar" "../submodule" "../foo/submodule"
test_submodule_relative_url "(null)" "../foo/submodule" "../submodule" "../foo/submodule"
test_submodule_relative_url "(null)" "../foo" "../submodule" "../submodule"
test_submodule_relative_url "(null)" "./foo/bar" "../submodule" "foo/submodule"
test_submodule_relative_url "(null)" "./foo" "../submodule" "submodule"
test_submodule_relative_url "(null)" "//somewhere else/repo" "../subrepo" "//somewhere else/subrepo"
-test_submodule_relative_url "(null)" "$PWD/subsuper_update_r" "../subsubsuper_update_r" "$(pwd)/subsubsuper_update_r"
-test_submodule_relative_url "(null)" "$PWD/super_update_r2" "../subsuper_update_r" "$(pwd)/subsuper_update_r"
-test_submodule_relative_url "(null)" "$PWD/." "../." "$(pwd)/."
-test_submodule_relative_url "(null)" "$PWD" "./." "$(pwd)/."
-test_submodule_relative_url "(null)" "$PWD/addtest" "../repo" "$(pwd)/repo"
-test_submodule_relative_url "(null)" "$PWD" "./å äö" "$(pwd)/å äö"
-test_submodule_relative_url "(null)" "$PWD/." "../submodule" "$(pwd)/submodule"
-test_submodule_relative_url "(null)" "$PWD/submodule" "../submodule" "$(pwd)/submodule"
-test_submodule_relative_url "(null)" "$PWD/home2/../remote" "../bundle1" "$(pwd)/home2/../bundle1"
-test_submodule_relative_url "(null)" "$PWD/submodule_update_repo" "./." "$(pwd)/submodule_update_repo/."
+test_submodule_relative_url "(null)" "$(pwd)/subsuper_update_r" "../subsubsuper_update_r" "$(pwd)/subsubsuper_update_r"
+test_submodule_relative_url "(null)" "$(pwd)/super_update_r2" "../subsuper_update_r" "$(pwd)/subsuper_update_r"
+test_submodule_relative_url "(null)" "$(pwd)/." "../." "$(pwd)/."
+test_submodule_relative_url "(null)" "$(pwd)" "./." "$(pwd)/."
+test_submodule_relative_url "(null)" "$(pwd)/addtest" "../repo" "$(pwd)/repo"
+test_submodule_relative_url "(null)" "$(pwd)" "./å äö" "$(pwd)/å äö"
+test_submodule_relative_url "(null)" "$(pwd)/." "../submodule" "$(pwd)/submodule"
+test_submodule_relative_url "(null)" "$(pwd)/submodule" "../submodule" "$(pwd)/submodule"
+test_submodule_relative_url "(null)" "$(pwd)/home2/../remote" "../bundle1" "$(pwd)/home2/../bundle1"
+test_submodule_relative_url "(null)" "$(pwd)/submodule_update_repo" "./." "$(pwd)/submodule_update_repo/."
test_submodule_relative_url "(null)" "file:///tmp/repo" "../subrepo" "file:///tmp/subrepo"
test_submodule_relative_url "(null)" "foo/bar" "../submodule" "foo/submodule"
test_submodule_relative_url "(null)" "foo" "../submodule" "submodule"
diff --git a/t/t2203-add-intent.sh b/t/t2203-add-intent.sh
index 8f22c43e24..84a9028c43 100755
--- a/t/t2203-add-intent.sh
+++ b/t/t2203-add-intent.sh
@@ -5,10 +5,24 @@ test_description='Intent to add'
. ./test-lib.sh
test_expect_success 'intent to add' '
+ test_commit 1 &&
+ git rm 1.t &&
+ echo hello >1.t &&
echo hello >file &&
echo hello >elif &&
git add -N file &&
- git add elif
+ git add elif &&
+ git add -N 1.t
+'
+
+test_expect_success 'git status' '
+ git status --porcelain | grep -v actual >actual &&
+ cat >expect <<-\EOF &&
+ DA 1.t
+ A elif
+ A file
+ EOF
+ test_cmp expect actual
'
test_expect_success 'check result of "add -N"' '
@@ -43,7 +57,9 @@ test_expect_success 'i-t-a entry is simply ignored' '
git add -N nitfol &&
git commit -m second &&
test $(git ls-tree HEAD -- nitfol | wc -l) = 0 &&
- test $(git diff --name-only HEAD -- nitfol | wc -l) = 1
+ test $(git diff --name-only HEAD -- nitfol | wc -l) = 1 &&
+ test $(git diff --name-only --ita-invisible-in-index HEAD -- nitfol | wc -l) = 0 &&
+ test $(git diff --name-only --ita-invisible-in-index -- nitfol | wc -l) = 1
'
test_expect_success 'can commit with an unrelated i-t-a entry in index' '
@@ -113,5 +129,26 @@ test_expect_success 'cache-tree does skip dir that becomes empty' '
)
'
+test_expect_success 'commit: ita entries ignored in empty intial commit check' '
+ git init empty-intial-commit &&
+ (
+ cd empty-intial-commit &&
+ : >one &&
+ git add -N one &&
+ test_must_fail git commit -m nothing-new-here
+ )
+'
+
+test_expect_success 'commit: ita entries ignored in empty commit check' '
+ git init empty-subsequent-commit &&
+ (
+ cd empty-subsequent-commit &&
+ test_commit one &&
+ : >two &&
+ git add -N two &&
+ test_must_fail git commit -m nothing-new-here
+ )
+'
+
test_done
diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh
index 51f3bbb8af..394f0005a1 100755
--- a/t/t3501-revert-cherry-pick.sh
+++ b/t/t3501-revert-cherry-pick.sh
@@ -96,7 +96,7 @@ test_expect_success 'revert forbidden on dirty working tree' '
echo content >extra_file &&
git add extra_file &&
test_must_fail git revert HEAD 2>errors &&
- test_i18ngrep "Your local changes would be overwritten by " errors
+ test_i18ngrep "your local changes would be overwritten by " errors
'
diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh
index 2142c1fa92..e1a6ccc00c 100755
--- a/t/t3903-stash.sh
+++ b/t/t3903-stash.sh
@@ -131,6 +131,26 @@ test_expect_success 'drop middle stash' '
test 1 = $(git show HEAD:file)
'
+test_expect_success 'drop middle stash by index' '
+ git reset --hard &&
+ echo 8 >file &&
+ git stash &&
+ echo 9 >file &&
+ git stash &&
+ git stash drop 1 &&
+ test 2 = $(git stash list | wc -l) &&
+ git stash apply &&
+ test 9 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file) &&
+ git reset --hard &&
+ git stash drop &&
+ git stash apply &&
+ test 3 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file)
+'
+
test_expect_success 'stash pop' '
git reset --hard &&
git stash pop &&
@@ -604,6 +624,21 @@ test_expect_success 'invalid ref of the form stash@{n}, n >= N' '
git stash drop
'
+test_expect_success 'invalid ref of the form "n", n >= N' '
+ git stash clear &&
+ test_must_fail git stash drop 0 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
+ git add file2 &&
+ git stash &&
+ test_must_fail git stash drop 1 &&
+ test_must_fail git stash pop 1 &&
+ test_must_fail git stash apply 1 &&
+ test_must_fail git stash show 1 &&
+ test_must_fail git stash branch tmp 1 &&
+ git stash drop
+'
+
test_expect_success 'stash branch should not drop the stash if the branch exists' '
git stash clear &&
echo foo >file &&
diff --git a/t/t7064-wtstatus-pv2.sh b/t/t7064-wtstatus-pv2.sh
index 3012a4d7c0..e319fa2e84 100755
--- a/t/t7064-wtstatus-pv2.sh
+++ b/t/t7064-wtstatus-pv2.sh
@@ -246,8 +246,8 @@ test_expect_success 'verify --intent-to-add output' '
git add --intent-to-add intent1.add intent2.add &&
cat >expect <<-EOF &&
- 1 AM N... 000000 100644 100644 $_z40 $EMPTY_BLOB intent1.add
- 1 AM N... 000000 100644 100644 $_z40 $EMPTY_BLOB intent2.add
+ 1 .A N... 000000 000000 100644 $_z40 $_z40 intent1.add
+ 1 .A N... 000000 000000 100644 $_z40 $_z40 intent2.add
EOF
git status --porcelain=v2 >actual &&
diff --git a/t/t7513-interpret-trailers.sh b/t/t7513-interpret-trailers.sh
index aee785cffa..4dd1d7c520 100755
--- a/t/t7513-interpret-trailers.sh
+++ b/t/t7513-interpret-trailers.sh
@@ -126,6 +126,305 @@ test_expect_success 'with multiline title in the message' '
test_cmp expected actual
'
+test_expect_success 'with non-trailer lines mixed with Signed-off-by' '
+ cat >patch <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ Signed-off-by: a <a@example.com>
+ this is not a trailer
+ EOF
+ cat >expected <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ Signed-off-by: a <a@example.com>
+ this is not a trailer
+ token: value
+ EOF
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'with non-trailer lines mixed with cherry picked from' '
+ cat >patch <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ (cherry picked from commit x)
+ this is not a trailer
+ EOF
+ cat >expected <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ (cherry picked from commit x)
+ this is not a trailer
+ token: value
+ EOF
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'with non-trailer lines mixed with a configured trailer' '
+ cat >patch <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ My-trailer: x
+ this is not a trailer
+ EOF
+ cat >expected <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ My-trailer: x
+ this is not a trailer
+ token: value
+ EOF
+ test_config trailer.my.key "My-trailer: " &&
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'with non-trailer lines mixed with a non-configured trailer' '
+ cat >patch <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ I-am-not-configured: x
+ this is not a trailer
+ EOF
+ cat >expected <<-\EOF &&
+
+ this is not a trailer
+ this is not a trailer
+ I-am-not-configured: x
+ this is not a trailer
+
+ token: value
+ EOF
+ test_config trailer.my.key "My-trailer: " &&
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'with all non-configured trailers' '
+ cat >patch <<-\EOF &&
+
+ I-am-not-configured: x
+ I-am-also-not-configured: x
+ EOF
+ cat >expected <<-\EOF &&
+
+ I-am-not-configured: x
+ I-am-also-not-configured: x
+ token: value
+ EOF
+ test_config trailer.my.key "My-trailer: " &&
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'with non-trailer lines only' '
+ cat >patch <<-\EOF &&
+
+ this is not a trailer
+ EOF
+ cat >expected <<-\EOF &&
+
+ this is not a trailer
+
+ token: value
+ EOF
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'line with leading whitespace is not trailer' '
+ q_to_tab >patch <<-\EOF &&
+
+ Qtoken: value
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ Qtoken: value
+
+ token: value
+ EOF
+ git interpret-trailers --trailer "token: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'multiline field treated as one trailer for 25% check' '
+ q_to_tab >patch <<-\EOF &&
+
+ Signed-off-by: a <a@example.com>
+ name: value on
+ Qmultiple lines
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ Signed-off-by: a <a@example.com>
+ name: value on
+ Qmultiple lines
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ this is not a trailer
+ name: value
+ EOF
+ git interpret-trailers --trailer "name: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'multiline field treated as atomic for placement' '
+ q_to_tab >patch <<-\EOF &&
+
+ another: trailer
+ name: value on
+ Qmultiple lines
+ another: trailer
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: value on
+ Qmultiple lines
+ name: value
+ another: trailer
+ EOF
+ test_config trailer.name.where after &&
+ git interpret-trailers --trailer "name: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'multiline field treated as atomic for replacement' '
+ q_to_tab >patch <<-\EOF &&
+
+ another: trailer
+ name: value on
+ Qmultiple lines
+ another: trailer
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ another: trailer
+ name: value
+ EOF
+ test_config trailer.name.ifexists replace &&
+ git interpret-trailers --trailer "name: value" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'multiline field treated as atomic for difference check' '
+ q_to_tab >patch <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ EOF
+ test_config trailer.name.ifexists addIfDifferent &&
+
+ q_to_tab >trailer <<-\EOF &&
+ name: first line
+ Qsecond line
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ EOF
+ git interpret-trailers --trailer "$(cat trailer)" patch >actual &&
+ test_cmp expected actual &&
+
+ q_to_tab >trailer <<-\EOF &&
+ name: first line
+ QQQQQsecond line
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ name: first line
+ QQQQQsecond line
+ EOF
+ git interpret-trailers --trailer "$(cat trailer)" patch >actual &&
+ test_cmp expected actual &&
+
+ q_to_tab >trailer <<-\EOF &&
+ name: first line *DIFFERENT*
+ Qsecond line
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ name: first line *DIFFERENT*
+ Qsecond line
+ EOF
+ git interpret-trailers --trailer "$(cat trailer)" patch >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'multiline field treated as atomic for neighbor check' '
+ q_to_tab >patch <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ EOF
+ test_config trailer.name.where after &&
+ test_config trailer.name.ifexists addIfDifferentNeighbor &&
+
+ q_to_tab >trailer <<-\EOF &&
+ name: first line
+ Qsecond line
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ another: trailer
+ EOF
+ git interpret-trailers --trailer "$(cat trailer)" patch >actual &&
+ test_cmp expected actual &&
+
+ q_to_tab >trailer <<-\EOF &&
+ name: first line
+ QQQQQsecond line
+ EOF
+ q_to_tab >expected <<-\EOF &&
+
+ another: trailer
+ name: first line
+ Qsecond line
+ name: first line
+ QQQQQsecond line
+ another: trailer
+ EOF
+ git interpret-trailers --trailer "$(cat trailer)" patch >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'with config setup' '
git config trailer.ack.key "Acked-by: " &&
cat >expected <<-\EOF &&
diff --git a/t/test-lib.sh b/t/test-lib.sh
index b859db61ac..cde7fc7fcf 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -809,7 +809,14 @@ then
return;
base=$(basename "$1")
- symlink_target=$GIT_BUILD_DIR/$base
+ case "$base" in
+ test-*)
+ symlink_target="$GIT_BUILD_DIR/t/helper/$base"
+ ;;
+ *)
+ symlink_target="$GIT_BUILD_DIR/$base"
+ ;;
+ esac
# do not override scripts
if test -x "$symlink_target" &&
test ! -d "$symlink_target" &&
diff --git a/t/valgrind/valgrind.sh b/t/valgrind/valgrind.sh
index 42153036dc..669ebaf68b 100755
--- a/t/valgrind/valgrind.sh
+++ b/t/valgrind/valgrind.sh
@@ -1,11 +1,19 @@
#!/bin/sh
base=$(basename "$0")
+case "$base" in
+test-*)
+ program="$GIT_VALGRIND/../../t/helper/$base"
+ ;;
+*)
+ program="$GIT_VALGRIND/../../$base"
+ ;;
+esac
TOOL_OPTIONS='--leak-check=no'
test -z "$GIT_VALGRIND_ENABLED" &&
-exec "$GIT_VALGRIND"/../../"$base" "$@"
+exec "$program" "$@"
case "$GIT_VALGRIND_MODE" in
memcheck-fast)
@@ -29,4 +37,4 @@ exec valgrind -q --error-exitcode=126 \
--log-fd=4 \
--input-fd=4 \
$GIT_VALGRIND_OPTIONS \
- "$GIT_VALGRIND"/../../"$base" "$@"
+ "$program" "$@"
diff --git a/templates/hooks--pre-receive.sample b/templates/hooks--pre-receive.sample
index a1fd29ec14..a1fd29ec14 100644..100755
--- a/templates/hooks--pre-receive.sample
+++ b/templates/hooks--pre-receive.sample
diff --git a/trailer.c b/trailer.c
index aecaf9232a..f0ecde2d2f 100644
--- a/trailer.c
+++ b/trailer.c
@@ -4,6 +4,7 @@
#include "commit.h"
#include "tempfile.h"
#include "trailer.h"
+#include "list.h"
/*
* Copyright (c) 2013, 2014 Christian Couder <chriscool@tuxfamily.org>
*/
@@ -25,19 +26,40 @@ struct conf_info {
static struct conf_info default_conf_info;
struct trailer_item {
- struct trailer_item *previous;
- struct trailer_item *next;
- const char *token;
- const char *value;
+ struct list_head list;
+ /*
+ * If this is not a trailer line, the line is stored in value
+ * (excluding the terminating newline) and token is NULL.
+ */
+ char *token;
+ char *value;
+};
+
+struct arg_item {
+ struct list_head list;
+ char *token;
+ char *value;
struct conf_info conf;
};
-static struct trailer_item *first_conf_item;
+static LIST_HEAD(conf_head);
static char *separators = ":";
#define TRAILER_ARG_STRING "$ARG"
+static const char *git_generated_prefixes[] = {
+ "Signed-off-by: ",
+ "(cherry picked from commit ",
+ NULL
+};
+
+/* Iterate over the elements of the list. */
+#define list_for_each_dir(pos, head, is_reverse) \
+ for (pos = is_reverse ? (head)->prev : (head)->next; \
+ pos != (head); \
+ pos = is_reverse ? pos->prev : pos->next)
+
static int after_or_end(enum action_where where)
{
return (where == WHERE_AFTER) || (where == WHERE_END);
@@ -56,21 +78,26 @@ static size_t token_len_without_separator(const char *token, size_t len)
return len;
}
-static int same_token(struct trailer_item *a, struct trailer_item *b)
+static int same_token(struct trailer_item *a, struct arg_item *b)
{
- size_t a_len = token_len_without_separator(a->token, strlen(a->token));
- size_t b_len = token_len_without_separator(b->token, strlen(b->token));
- size_t min_len = (a_len > b_len) ? b_len : a_len;
+ size_t a_len, b_len, min_len;
+
+ if (!a->token)
+ return 0;
+
+ a_len = token_len_without_separator(a->token, strlen(a->token));
+ b_len = token_len_without_separator(b->token, strlen(b->token));
+ min_len = (a_len > b_len) ? b_len : a_len;
return !strncasecmp(a->token, b->token, min_len);
}
-static int same_value(struct trailer_item *a, struct trailer_item *b)
+static int same_value(struct trailer_item *a, struct arg_item *b)
{
return !strcasecmp(a->value, b->value);
}
-static int same_trailer(struct trailer_item *a, struct trailer_item *b)
+static int same_trailer(struct trailer_item *a, struct arg_item *b)
{
return same_token(a, b) && same_value(a, b);
}
@@ -92,11 +119,18 @@ static inline void strbuf_replace(struct strbuf *sb, const char *a, const char *
static void free_trailer_item(struct trailer_item *item)
{
+ free(item->token);
+ free(item->value);
+ free(item);
+}
+
+static void free_arg_item(struct arg_item *item)
+{
free(item->conf.name);
free(item->conf.key);
free(item->conf.command);
- free((char *)item->token);
- free((char *)item->value);
+ free(item->token);
+ free(item->value);
free(item);
}
@@ -111,7 +145,14 @@ static char last_non_space_char(const char *s)
static void print_tok_val(FILE *outfile, const char *tok, const char *val)
{
- char c = last_non_space_char(tok);
+ char c;
+
+ if (!tok) {
+ fprintf(outfile, "%s\n", val);
+ return;
+ }
+
+ c = last_non_space_char(tok);
if (!c)
return;
if (strchr(separators, c))
@@ -120,108 +161,68 @@ static void print_tok_val(FILE *outfile, const char *tok, const char *val)
fprintf(outfile, "%s%c %s\n", tok, separators[0], val);
}
-static void print_all(FILE *outfile, struct trailer_item *first, int trim_empty)
+static void print_all(FILE *outfile, struct list_head *head, int trim_empty)
{
+ struct list_head *pos;
struct trailer_item *item;
- for (item = first; item; item = item->next) {
+ list_for_each(pos, head) {
+ item = list_entry(pos, struct trailer_item, list);
if (!trim_empty || strlen(item->value) > 0)
print_tok_val(outfile, item->token, item->value);
}
}
-static void update_last(struct trailer_item **last)
-{
- if (*last)
- while ((*last)->next != NULL)
- *last = (*last)->next;
-}
-
-static void update_first(struct trailer_item **first)
+static struct trailer_item *trailer_from_arg(struct arg_item *arg_tok)
{
- if (*first)
- while ((*first)->previous != NULL)
- *first = (*first)->previous;
+ struct trailer_item *new = xcalloc(sizeof(*new), 1);
+ new->token = arg_tok->token;
+ new->value = arg_tok->value;
+ arg_tok->token = arg_tok->value = NULL;
+ free_arg_item(arg_tok);
+ return new;
}
static void add_arg_to_input_list(struct trailer_item *on_tok,
- struct trailer_item *arg_tok,
- struct trailer_item **first,
- struct trailer_item **last)
-{
- if (after_or_end(arg_tok->conf.where)) {
- arg_tok->next = on_tok->next;
- on_tok->next = arg_tok;
- arg_tok->previous = on_tok;
- if (arg_tok->next)
- arg_tok->next->previous = arg_tok;
- update_last(last);
- } else {
- arg_tok->previous = on_tok->previous;
- on_tok->previous = arg_tok;
- arg_tok->next = on_tok;
- if (arg_tok->previous)
- arg_tok->previous->next = arg_tok;
- update_first(first);
- }
+ struct arg_item *arg_tok)
+{
+ int aoe = after_or_end(arg_tok->conf.where);
+ struct trailer_item *to_add = trailer_from_arg(arg_tok);
+ if (aoe)
+ list_add(&to_add->list, &on_tok->list);
+ else
+ list_add_tail(&to_add->list, &on_tok->list);
}
static int check_if_different(struct trailer_item *in_tok,
- struct trailer_item *arg_tok,
- int check_all)
+ struct arg_item *arg_tok,
+ int check_all,
+ struct list_head *head)
{
enum action_where where = arg_tok->conf.where;
+ struct list_head *next_head;
do {
- if (!in_tok)
- return 1;
if (same_trailer(in_tok, arg_tok))
return 0;
/*
* if we want to add a trailer after another one,
* we have to check those before this one
*/
- in_tok = after_or_end(where) ? in_tok->previous : in_tok->next;
+ next_head = after_or_end(where) ? in_tok->list.prev
+ : in_tok->list.next;
+ if (next_head == head)
+ break;
+ in_tok = list_entry(next_head, struct trailer_item, list);
} while (check_all);
return 1;
}
-static void remove_from_list(struct trailer_item *item,
- struct trailer_item **first,
- struct trailer_item **last)
-{
- struct trailer_item *next = item->next;
- struct trailer_item *previous = item->previous;
-
- if (next) {
- item->next->previous = previous;
- item->next = NULL;
- } else if (last)
- *last = previous;
-
- if (previous) {
- item->previous->next = next;
- item->previous = NULL;
- } else if (first)
- *first = next;
-}
-
-static struct trailer_item *remove_first(struct trailer_item **first)
-{
- struct trailer_item *item = *first;
- *first = item->next;
- if (item->next) {
- item->next->previous = NULL;
- item->next = NULL;
- }
- return item;
-}
-
-static const char *apply_command(const char *command, const char *arg)
+static char *apply_command(const char *command, const char *arg)
{
struct strbuf cmd = STRBUF_INIT;
struct strbuf buf = STRBUF_INIT;
struct child_process cp = CHILD_PROCESS_INIT;
const char *argv[] = {NULL, NULL};
- const char *result;
+ char *result;
strbuf_addstr(&cmd, command);
if (arg)
@@ -246,7 +247,7 @@ static const char *apply_command(const char *command, const char *arg)
return result;
}
-static void apply_item_command(struct trailer_item *in_tok, struct trailer_item *arg_tok)
+static void apply_item_command(struct trailer_item *in_tok, struct arg_item *arg_tok)
{
if (arg_tok->conf.command) {
const char *arg;
@@ -264,121 +265,108 @@ static void apply_item_command(struct trailer_item *in_tok, struct trailer_item
}
static void apply_arg_if_exists(struct trailer_item *in_tok,
- struct trailer_item *arg_tok,
+ struct arg_item *arg_tok,
struct trailer_item *on_tok,
- struct trailer_item **in_tok_first,
- struct trailer_item **in_tok_last)
+ struct list_head *head)
{
switch (arg_tok->conf.if_exists) {
case EXISTS_DO_NOTHING:
- free_trailer_item(arg_tok);
+ free_arg_item(arg_tok);
break;
case EXISTS_REPLACE:
apply_item_command(in_tok, arg_tok);
- add_arg_to_input_list(on_tok, arg_tok,
- in_tok_first, in_tok_last);
- remove_from_list(in_tok, in_tok_first, in_tok_last);
+ add_arg_to_input_list(on_tok, arg_tok);
+ list_del(&in_tok->list);
free_trailer_item(in_tok);
break;
case EXISTS_ADD:
apply_item_command(in_tok, arg_tok);
- add_arg_to_input_list(on_tok, arg_tok,
- in_tok_first, in_tok_last);
+ add_arg_to_input_list(on_tok, arg_tok);
break;
case EXISTS_ADD_IF_DIFFERENT:
apply_item_command(in_tok, arg_tok);
- if (check_if_different(in_tok, arg_tok, 1))
- add_arg_to_input_list(on_tok, arg_tok,
- in_tok_first, in_tok_last);
+ if (check_if_different(in_tok, arg_tok, 1, head))
+ add_arg_to_input_list(on_tok, arg_tok);
else
- free_trailer_item(arg_tok);
+ free_arg_item(arg_tok);
break;
case EXISTS_ADD_IF_DIFFERENT_NEIGHBOR:
apply_item_command(in_tok, arg_tok);
- if (check_if_different(on_tok, arg_tok, 0))
- add_arg_to_input_list(on_tok, arg_tok,
- in_tok_first, in_tok_last);
+ if (check_if_different(on_tok, arg_tok, 0, head))
+ add_arg_to_input_list(on_tok, arg_tok);
else
- free_trailer_item(arg_tok);
+ free_arg_item(arg_tok);
break;
}
}
-static void apply_arg_if_missing(struct trailer_item **in_tok_first,
- struct trailer_item **in_tok_last,
- struct trailer_item *arg_tok)
+static void apply_arg_if_missing(struct list_head *head,
+ struct arg_item *arg_tok)
{
- struct trailer_item **in_tok;
enum action_where where;
+ struct trailer_item *to_add;
switch (arg_tok->conf.if_missing) {
case MISSING_DO_NOTHING:
- free_trailer_item(arg_tok);
+ free_arg_item(arg_tok);
break;
case MISSING_ADD:
where = arg_tok->conf.where;
- in_tok = after_or_end(where) ? in_tok_last : in_tok_first;
apply_item_command(NULL, arg_tok);
- if (*in_tok) {
- add_arg_to_input_list(*in_tok, arg_tok,
- in_tok_first, in_tok_last);
- } else {
- *in_tok_first = arg_tok;
- *in_tok_last = arg_tok;
- }
- break;
+ to_add = trailer_from_arg(arg_tok);
+ if (after_or_end(where))
+ list_add_tail(&to_add->list, head);
+ else
+ list_add(&to_add->list, head);
}
}
-static int find_same_and_apply_arg(struct trailer_item **in_tok_first,
- struct trailer_item **in_tok_last,
- struct trailer_item *arg_tok)
+static int find_same_and_apply_arg(struct list_head *head,
+ struct arg_item *arg_tok)
{
+ struct list_head *pos;
struct trailer_item *in_tok;
struct trailer_item *on_tok;
- struct trailer_item *following_tok;
enum action_where where = arg_tok->conf.where;
int middle = (where == WHERE_AFTER) || (where == WHERE_BEFORE);
int backwards = after_or_end(where);
- struct trailer_item *start_tok = backwards ? *in_tok_last : *in_tok_first;
+ struct trailer_item *start_tok;
+
+ if (list_empty(head))
+ return 0;
- for (in_tok = start_tok; in_tok; in_tok = following_tok) {
- following_tok = backwards ? in_tok->previous : in_tok->next;
+ start_tok = list_entry(backwards ? head->prev : head->next,
+ struct trailer_item,
+ list);
+
+ list_for_each_dir(pos, head, backwards) {
+ in_tok = list_entry(pos, struct trailer_item, list);
if (!same_token(in_tok, arg_tok))
continue;
on_tok = middle ? in_tok : start_tok;
- apply_arg_if_exists(in_tok, arg_tok, on_tok,
- in_tok_first, in_tok_last);
+ apply_arg_if_exists(in_tok, arg_tok, on_tok, head);
return 1;
}
return 0;
}
-static void process_trailers_lists(struct trailer_item **in_tok_first,
- struct trailer_item **in_tok_last,
- struct trailer_item **arg_tok_first)
+static void process_trailers_lists(struct list_head *head,
+ struct list_head *arg_head)
{
- struct trailer_item *arg_tok;
- struct trailer_item *next_arg;
-
- if (!*arg_tok_first)
- return;
+ struct list_head *pos, *p;
+ struct arg_item *arg_tok;
- for (arg_tok = *arg_tok_first; arg_tok; arg_tok = next_arg) {
+ list_for_each_safe(pos, p, arg_head) {
int applied = 0;
+ arg_tok = list_entry(pos, struct arg_item, list);
- next_arg = arg_tok->next;
- remove_from_list(arg_tok, arg_tok_first, NULL);
+ list_del(pos);
- applied = find_same_and_apply_arg(in_tok_first,
- in_tok_last,
- arg_tok);
+ applied = find_same_and_apply_arg(head, arg_tok);
if (!applied)
- apply_arg_if_missing(in_tok_first,
- in_tok_last,
- arg_tok);
+ apply_arg_if_missing(head, arg_tok);
}
}
@@ -425,7 +413,7 @@ static int set_if_missing(struct conf_info *item, const char *value)
return 0;
}
-static void duplicate_conf(struct conf_info *dst, struct conf_info *src)
+static void duplicate_conf(struct conf_info *dst, const struct conf_info *src)
{
*dst = *src;
dst->name = xstrdup_or_null(src->name);
@@ -433,30 +421,24 @@ static void duplicate_conf(struct conf_info *dst, struct conf_info *src)
dst->command = xstrdup_or_null(src->command);
}
-static struct trailer_item *get_conf_item(const char *name)
+static struct arg_item *get_conf_item(const char *name)
{
- struct trailer_item *item;
- struct trailer_item *previous;
+ struct list_head *pos;
+ struct arg_item *item;
/* Look up item with same name */
- for (previous = NULL, item = first_conf_item;
- item;
- previous = item, item = item->next) {
+ list_for_each(pos, &conf_head) {
+ item = list_entry(pos, struct arg_item, list);
if (!strcasecmp(item->conf.name, name))
return item;
}
/* Item does not already exists, create it */
- item = xcalloc(sizeof(struct trailer_item), 1);
+ item = xcalloc(sizeof(*item), 1);
duplicate_conf(&item->conf, &default_conf_info);
item->conf.name = xstrdup(name);
- if (!previous)
- first_conf_item = item;
- else {
- previous->next = item;
- item->previous = previous;
- }
+ list_add_tail(&item->list, &conf_head);
return item;
}
@@ -506,7 +488,7 @@ static int git_trailer_default_config(const char *conf_key, const char *value, v
static int git_trailer_config(const char *conf_key, const char *value, void *cb)
{
const char *trailer_item, *variable_name;
- struct trailer_item *item;
+ struct arg_item *item;
struct conf_info *conf;
char *name = NULL;
enum trailer_info_type type;
@@ -564,33 +546,7 @@ static int git_trailer_config(const char *conf_key, const char *value, void *cb)
return 0;
}
-static int parse_trailer(struct strbuf *tok, struct strbuf *val, const char *trailer)
-{
- size_t len;
- struct strbuf seps = STRBUF_INIT;
- strbuf_addstr(&seps, separators);
- strbuf_addch(&seps, '=');
- len = strcspn(trailer, seps.buf);
- strbuf_release(&seps);
- if (len == 0) {
- int l = strlen(trailer);
- while (l > 0 && isspace(trailer[l - 1]))
- l--;
- return error(_("empty trailer token in trailer '%.*s'"), l, trailer);
- }
- if (len < strlen(trailer)) {
- strbuf_add(tok, trailer, len);
- strbuf_trim(tok);
- strbuf_addstr(val, trailer + len + 1);
- strbuf_trim(val);
- } else {
- strbuf_addstr(tok, trailer);
- strbuf_trim(tok);
- }
- return 0;
-}
-
-static const char *token_from_item(struct trailer_item *item, char *tok)
+static const char *token_from_item(struct arg_item *item, char *tok)
{
if (item->conf.key)
return item->conf.key;
@@ -599,94 +555,134 @@ static const char *token_from_item(struct trailer_item *item, char *tok)
return item->conf.name;
}
-static struct trailer_item *new_trailer_item(struct trailer_item *conf_item,
- char *tok, char *val)
-{
- struct trailer_item *new = xcalloc(sizeof(*new), 1);
- new->value = val ? val : xstrdup("");
-
- if (conf_item) {
- duplicate_conf(&new->conf, &conf_item->conf);
- new->token = xstrdup(token_from_item(conf_item, tok));
- free(tok);
- } else {
- duplicate_conf(&new->conf, &default_conf_info);
- new->token = tok;
- }
-
- return new;
-}
-
-static int token_matches_item(const char *tok, struct trailer_item *item, int tok_len)
+static int token_matches_item(const char *tok, struct arg_item *item, int tok_len)
{
if (!strncasecmp(tok, item->conf.name, tok_len))
return 1;
return item->conf.key ? !strncasecmp(tok, item->conf.key, tok_len) : 0;
}
-static struct trailer_item *create_trailer_item(const char *string)
+/*
+ * Return the location of the first separator in line, or -1 if there is no
+ * separator.
+ */
+static int find_separator(const char *line, const char *separators)
{
- struct strbuf tok = STRBUF_INIT;
- struct strbuf val = STRBUF_INIT;
- struct trailer_item *item;
- int tok_len;
+ int loc = strcspn(line, separators);
+ if (!line[loc])
+ return -1;
+ return loc;
+}
- if (parse_trailer(&tok, &val, string))
- return NULL;
+/*
+ * Obtain the token, value, and conf from the given trailer.
+ *
+ * separator_pos must not be 0, since the token cannot be an empty string.
+ *
+ * If separator_pos is -1, interpret the whole trailer as a token.
+ */
+static void parse_trailer(struct strbuf *tok, struct strbuf *val,
+ const struct conf_info **conf, const char *trailer,
+ int separator_pos)
+{
+ struct arg_item *item;
+ int tok_len;
+ struct list_head *pos;
- tok_len = token_len_without_separator(tok.buf, tok.len);
+ if (separator_pos != -1) {
+ strbuf_add(tok, trailer, separator_pos);
+ strbuf_trim(tok);
+ strbuf_addstr(val, trailer + separator_pos + 1);
+ strbuf_trim(val);
+ } else {
+ strbuf_addstr(tok, trailer);
+ strbuf_trim(tok);
+ }
/* Lookup if the token matches something in the config */
- for (item = first_conf_item; item; item = item->next) {
- if (token_matches_item(tok.buf, item, tok_len))
- return new_trailer_item(item,
- strbuf_detach(&tok, NULL),
- strbuf_detach(&val, NULL));
+ tok_len = token_len_without_separator(tok->buf, tok->len);
+ if (conf)
+ *conf = &default_conf_info;
+ list_for_each(pos, &conf_head) {
+ item = list_entry(pos, struct arg_item, list);
+ if (token_matches_item(tok->buf, item, tok_len)) {
+ char *tok_buf = strbuf_detach(tok, NULL);
+ if (conf)
+ *conf = &item->conf;
+ strbuf_addstr(tok, token_from_item(item, tok_buf));
+ free(tok_buf);
+ break;
+ }
}
+}
- return new_trailer_item(NULL,
- strbuf_detach(&tok, NULL),
- strbuf_detach(&val, NULL));
+static struct trailer_item *add_trailer_item(struct list_head *head, char *tok,
+ char *val)
+{
+ struct trailer_item *new = xcalloc(sizeof(*new), 1);
+ new->token = tok;
+ new->value = val;
+ list_add_tail(&new->list, head);
+ return new;
}
-static void add_trailer_item(struct trailer_item **first,
- struct trailer_item **last,
- struct trailer_item *new)
+static void add_arg_item(struct list_head *arg_head, char *tok, char *val,
+ const struct conf_info *conf)
{
- if (!new)
- return;
- if (!*last) {
- *first = new;
- *last = new;
- } else {
- (*last)->next = new;
- new->previous = *last;
- *last = new;
- }
+ struct arg_item *new = xcalloc(sizeof(*new), 1);
+ new->token = tok;
+ new->value = val;
+ duplicate_conf(&new->conf, conf);
+ list_add_tail(&new->list, arg_head);
}
-static struct trailer_item *process_command_line_args(struct string_list *trailers)
+static void process_command_line_args(struct list_head *arg_head,
+ struct string_list *trailers)
{
- struct trailer_item *arg_tok_first = NULL;
- struct trailer_item *arg_tok_last = NULL;
struct string_list_item *tr;
- struct trailer_item *item;
+ struct arg_item *item;
+ struct strbuf tok = STRBUF_INIT;
+ struct strbuf val = STRBUF_INIT;
+ const struct conf_info *conf;
+ struct list_head *pos;
- /* Add a trailer item for each configured trailer with a command */
- for (item = first_conf_item; item; item = item->next) {
- if (item->conf.command) {
- struct trailer_item *new = new_trailer_item(item, NULL, NULL);
- add_trailer_item(&arg_tok_first, &arg_tok_last, new);
- }
+ /*
+ * In command-line arguments, '=' is accepted (in addition to the
+ * separators that are defined).
+ */
+ char *cl_separators = xstrfmt("=%s", separators);
+
+ /* Add an arg item for each configured trailer with a command */
+ list_for_each(pos, &conf_head) {
+ item = list_entry(pos, struct arg_item, list);
+ if (item->conf.command)
+ add_arg_item(arg_head,
+ xstrdup(token_from_item(item, NULL)),
+ xstrdup(""),
+ &item->conf);
}
- /* Add a trailer item for each trailer on the command line */
+ /* Add an arg item for each trailer on the command line */
for_each_string_list_item(tr, trailers) {
- struct trailer_item *new = create_trailer_item(tr->string);
- add_trailer_item(&arg_tok_first, &arg_tok_last, new);
+ int separator_pos = find_separator(tr->string, cl_separators);
+ if (separator_pos == 0) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, tr->string);
+ strbuf_trim(&sb);
+ error(_("empty trailer token in trailer '%.*s'"),
+ (int) sb.len, sb.buf);
+ strbuf_release(&sb);
+ } else {
+ parse_trailer(&tok, &val, &conf, tr->string,
+ separator_pos);
+ add_arg_item(arg_head,
+ strbuf_detach(&tok, NULL),
+ strbuf_detach(&val, NULL),
+ conf);
+ }
}
- return arg_tok_first;
+ free(cl_separators);
}
static struct strbuf **read_input_file(const char *file)
@@ -734,6 +730,15 @@ static int find_patch_start(struct strbuf **lines, int count)
static int find_trailer_start(struct strbuf **lines, int count)
{
int start, end_of_title, only_spaces = 1;
+ int recognized_prefix = 0, trailer_lines = 0, non_trailer_lines = 0;
+ /*
+ * Number of possible continuation lines encountered. This will be
+ * reset to 0 if we encounter a trailer (since those lines are to be
+ * considered continuations of that trailer), and added to
+ * non_trailer_lines if we encounter a non-trailer (since those lines
+ * are to be considered non-trailers).
+ */
+ int possible_continuation_lines = 0;
/* The first paragraph is the title and cannot be trailers */
for (start = 0; start < count; start++) {
@@ -745,26 +750,71 @@ static int find_trailer_start(struct strbuf **lines, int count)
end_of_title = start;
/*
- * Get the start of the trailers by looking starting from the end
- * for a line with only spaces before lines with one separator.
+ * Get the start of the trailers by looking starting from the end for a
+ * blank line before a set of non-blank lines that (i) are all
+ * trailers, or (ii) contains at least one Git-generated trailer and
+ * consists of at least 25% trailers.
*/
for (start = count - 1; start >= end_of_title; start--) {
- if (lines[start]->buf[0] == comment_line_char)
+ const char **p;
+ int separator_pos;
+
+ if (lines[start]->buf[0] == comment_line_char) {
+ non_trailer_lines += possible_continuation_lines;
+ possible_continuation_lines = 0;
continue;
+ }
if (contains_only_spaces(lines[start]->buf)) {
if (only_spaces)
continue;
- return start + 1;
+ non_trailer_lines += possible_continuation_lines;
+ if (recognized_prefix &&
+ trailer_lines * 3 >= non_trailer_lines)
+ return start + 1;
+ if (trailer_lines && !non_trailer_lines)
+ return start + 1;
+ return count;
}
- if (strcspn(lines[start]->buf, separators) < lines[start]->len) {
- if (only_spaces)
- only_spaces = 0;
- continue;
+ only_spaces = 0;
+
+ for (p = git_generated_prefixes; *p; p++) {
+ if (starts_with(lines[start]->buf, *p)) {
+ trailer_lines++;
+ possible_continuation_lines = 0;
+ recognized_prefix = 1;
+ goto continue_outer_loop;
+ }
+ }
+
+ separator_pos = find_separator(lines[start]->buf, separators);
+ if (separator_pos >= 1 && !isspace(lines[start]->buf[0])) {
+ struct list_head *pos;
+
+ trailer_lines++;
+ possible_continuation_lines = 0;
+ if (recognized_prefix)
+ continue;
+ list_for_each(pos, &conf_head) {
+ struct arg_item *item;
+ item = list_entry(pos, struct arg_item, list);
+ if (token_matches_item(lines[start]->buf, item,
+ separator_pos)) {
+ recognized_prefix = 1;
+ break;
+ }
+ }
+ } else if (isspace(lines[start]->buf[0]))
+ possible_continuation_lines++;
+ else {
+ non_trailer_lines++;
+ non_trailer_lines += possible_continuation_lines;
+ possible_continuation_lines = 0;
}
- return count;
+continue_outer_loop:
+ ;
}
- return only_spaces ? count : 0;
+ return count;
}
/* Get the index of the end of the trailers */
@@ -802,11 +852,13 @@ static void print_lines(FILE *outfile, struct strbuf **lines, int start, int end
static int process_input_file(FILE *outfile,
struct strbuf **lines,
- struct trailer_item **in_tok_first,
- struct trailer_item **in_tok_last)
+ struct list_head *head)
{
int count = 0;
int patch_start, trailer_start, trailer_end, i;
+ struct strbuf tok = STRBUF_INIT;
+ struct strbuf val = STRBUF_INIT;
+ struct trailer_item *last = NULL;
/* Get the line count */
while (lines[count])
@@ -824,20 +876,43 @@ static int process_input_file(FILE *outfile,
/* Parse trailer lines */
for (i = trailer_start; i < trailer_end; i++) {
- if (lines[i]->buf[0] != comment_line_char) {
- struct trailer_item *new = create_trailer_item(lines[i]->buf);
- add_trailer_item(in_tok_first, in_tok_last, new);
+ int separator_pos;
+ if (lines[i]->buf[0] == comment_line_char)
+ continue;
+ if (last && isspace(lines[i]->buf[0])) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addf(&sb, "%s\n%s", last->value, lines[i]->buf);
+ strbuf_strip_suffix(&sb, "\n");
+ free(last->value);
+ last->value = strbuf_detach(&sb, NULL);
+ continue;
+ }
+ separator_pos = find_separator(lines[i]->buf, separators);
+ if (separator_pos >= 1) {
+ parse_trailer(&tok, &val, NULL, lines[i]->buf,
+ separator_pos);
+ last = add_trailer_item(head,
+ strbuf_detach(&tok, NULL),
+ strbuf_detach(&val, NULL));
+ } else {
+ strbuf_addbuf(&val, lines[i]);
+ strbuf_strip_suffix(&val, "\n");
+ add_trailer_item(head,
+ NULL,
+ strbuf_detach(&val, NULL));
+ last = NULL;
}
}
return trailer_end;
}
-static void free_all(struct trailer_item **first)
+static void free_all(struct list_head *head)
{
- while (*first) {
- struct trailer_item *item = remove_first(first);
- free_trailer_item(item);
+ struct list_head *pos, *p;
+ list_for_each_safe(pos, p, head) {
+ list_del(pos);
+ free_trailer_item(list_entry(pos, struct trailer_item, list));
}
}
@@ -874,9 +949,8 @@ static FILE *create_in_place_tempfile(const char *file)
void process_trailers(const char *file, int in_place, int trim_empty, struct string_list *trailers)
{
- struct trailer_item *in_tok_first = NULL;
- struct trailer_item *in_tok_last = NULL;
- struct trailer_item *arg_tok_first;
+ LIST_HEAD(head);
+ LIST_HEAD(arg_head);
struct strbuf **lines;
int trailer_end;
FILE *outfile = stdout;
@@ -891,15 +965,15 @@ void process_trailers(const char *file, int in_place, int trim_empty, struct str
outfile = create_in_place_tempfile(file);
/* Print the lines before the trailers */
- trailer_end = process_input_file(outfile, lines, &in_tok_first, &in_tok_last);
+ trailer_end = process_input_file(outfile, lines, &head);
- arg_tok_first = process_command_line_args(trailers);
+ process_command_line_args(&arg_head, trailers);
- process_trailers_lists(&in_tok_first, &in_tok_last, &arg_tok_first);
+ process_trailers_lists(&head, &arg_head);
- print_all(outfile, in_tok_first, trim_empty);
+ print_all(outfile, &head, trim_empty);
- free_all(&in_tok_first);
+ free_all(&head);
/* Print the lines after the trailers as is */
print_lines(outfile, lines, trailer_end, INT_MAX);
diff --git a/transport.c b/transport.c
index 079499dbaf..d57e8dec28 100644
--- a/transport.c
+++ b/transport.c
@@ -307,7 +307,9 @@ void transport_update_tracking_ref(struct remote *remote, struct ref *ref, int v
}
}
-static void print_ref_status(char flag, const char *summary, struct ref *to, struct ref *from, const char *msg, int porcelain)
+static void print_ref_status(char flag, const char *summary,
+ struct ref *to, struct ref *from, const char *msg,
+ int porcelain, int summary_width)
{
if (porcelain) {
if (from)
@@ -319,7 +321,7 @@ static void print_ref_status(char flag, const char *summary, struct ref *to, str
else
fprintf(stdout, "%s\n", summary);
} else {
- fprintf(stderr, " %c %-*s ", flag, TRANSPORT_SUMMARY_WIDTH, summary);
+ fprintf(stderr, " %c %-*s ", flag, summary_width, summary);
if (from)
fprintf(stderr, "%s -> %s", prettify_refname(from->name), prettify_refname(to->name));
else
@@ -333,15 +335,16 @@ static void print_ref_status(char flag, const char *summary, struct ref *to, str
}
}
-static void print_ok_ref_status(struct ref *ref, int porcelain)
+static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_width)
{
if (ref->deletion)
- print_ref_status('-', "[deleted]", ref, NULL, NULL, porcelain);
+ print_ref_status('-', "[deleted]", ref, NULL, NULL,
+ porcelain, summary_width);
else if (is_null_oid(&ref->old_oid))
print_ref_status('*',
(starts_with(ref->name, "refs/tags/") ? "[new tag]" :
"[new branch]"),
- ref, ref->peer_ref, NULL, porcelain);
+ ref, ref->peer_ref, NULL, porcelain, summary_width);
else {
struct strbuf quickref = STRBUF_INIT;
char type;
@@ -361,12 +364,14 @@ static void print_ok_ref_status(struct ref *ref, int porcelain)
strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash,
DEFAULT_ABBREV);
- print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg, porcelain);
+ print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg,
+ porcelain, summary_width);
strbuf_release(&quickref);
}
}
-static int print_one_push_status(struct ref *ref, const char *dest, int count, int porcelain)
+static int print_one_push_status(struct ref *ref, const char *dest, int count,
+ int porcelain, int summary_width)
{
if (!count) {
char *url = transport_anonymize_url(dest);
@@ -376,62 +381,87 @@ static int print_one_push_status(struct ref *ref, const char *dest, int count, i
switch(ref->status) {
case REF_STATUS_NONE:
- print_ref_status('X', "[no match]", ref, NULL, NULL, porcelain);
+ print_ref_status('X', "[no match]", ref, NULL, NULL,
+ porcelain, summary_width);
break;
case REF_STATUS_REJECT_NODELETE:
print_ref_status('!', "[rejected]", ref, NULL,
- "remote does not support deleting refs", porcelain);
+ "remote does not support deleting refs",
+ porcelain, summary_width);
break;
case REF_STATUS_UPTODATE:
print_ref_status('=', "[up to date]", ref,
- ref->peer_ref, NULL, porcelain);
+ ref->peer_ref, NULL, porcelain, summary_width);
break;
case REF_STATUS_REJECT_NONFASTFORWARD:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "non-fast-forward", porcelain);
+ "non-fast-forward", porcelain, summary_width);
break;
case REF_STATUS_REJECT_ALREADY_EXISTS:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "already exists", porcelain);
+ "already exists", porcelain, summary_width);
break;
case REF_STATUS_REJECT_FETCH_FIRST:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "fetch first", porcelain);
+ "fetch first", porcelain, summary_width);
break;
case REF_STATUS_REJECT_NEEDS_FORCE:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "needs force", porcelain);
+ "needs force", porcelain, summary_width);
break;
case REF_STATUS_REJECT_STALE:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "stale info", porcelain);
+ "stale info", porcelain, summary_width);
break;
case REF_STATUS_REJECT_SHALLOW:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "new shallow roots not allowed", porcelain);
+ "new shallow roots not allowed",
+ porcelain, summary_width);
break;
case REF_STATUS_REMOTE_REJECT:
print_ref_status('!', "[remote rejected]", ref,
- ref->deletion ? NULL : ref->peer_ref,
- ref->remote_status, porcelain);
+ ref->deletion ? NULL : ref->peer_ref,
+ ref->remote_status, porcelain, summary_width);
break;
case REF_STATUS_EXPECTING_REPORT:
print_ref_status('!', "[remote failure]", ref,
- ref->deletion ? NULL : ref->peer_ref,
- "remote failed to report status", porcelain);
+ ref->deletion ? NULL : ref->peer_ref,
+ "remote failed to report status",
+ porcelain, summary_width);
break;
case REF_STATUS_ATOMIC_PUSH_FAILED:
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
- "atomic push failed", porcelain);
+ "atomic push failed", porcelain, summary_width);
break;
case REF_STATUS_OK:
- print_ok_ref_status(ref, porcelain);
+ print_ok_ref_status(ref, porcelain, summary_width);
break;
}
return 1;
}
+static int measure_abbrev(const struct object_id *oid, int sofar)
+{
+ char hex[GIT_SHA1_HEXSZ + 1];
+ int w = find_unique_abbrev_r(hex, oid->hash, DEFAULT_ABBREV);
+
+ return (w < sofar) ? sofar : w;
+}
+
+int transport_summary_width(const struct ref *refs)
+{
+ int maxw = -1;
+
+ for (; refs; refs = refs->next) {
+ maxw = measure_abbrev(&refs->old_oid, maxw);
+ maxw = measure_abbrev(&refs->new_oid, maxw);
+ }
+ if (maxw < 0)
+ maxw = FALLBACK_DEFAULT_ABBREV;
+ return (2 * maxw + 3);
+}
+
void transport_print_push_status(const char *dest, struct ref *refs,
int verbose, int porcelain, unsigned int *reject_reasons)
{
@@ -439,25 +469,29 @@ void transport_print_push_status(const char *dest, struct ref *refs,
int n = 0;
unsigned char head_sha1[20];
char *head;
+ int summary_width = transport_summary_width(refs);
head = resolve_refdup("HEAD", RESOLVE_REF_READING, head_sha1, NULL);
if (verbose) {
for (ref = refs; ref; ref = ref->next)
if (ref->status == REF_STATUS_UPTODATE)
- n += print_one_push_status(ref, dest, n, porcelain);
+ n += print_one_push_status(ref, dest, n,
+ porcelain, summary_width);
}
for (ref = refs; ref; ref = ref->next)
if (ref->status == REF_STATUS_OK)
- n += print_one_push_status(ref, dest, n, porcelain);
+ n += print_one_push_status(ref, dest, n,
+ porcelain, summary_width);
*reject_reasons = 0;
for (ref = refs; ref; ref = ref->next) {
if (ref->status != REF_STATUS_NONE &&
ref->status != REF_STATUS_UPTODATE &&
ref->status != REF_STATUS_OK)
- n += print_one_push_status(ref, dest, n, porcelain);
+ n += print_one_push_status(ref, dest, n,
+ porcelain, summary_width);
if (ref->status == REF_STATUS_REJECT_NONFASTFORWARD) {
if (head != NULL && !strcmp(head, ref->name))
*reject_reasons |= REJECT_NON_FF_HEAD;
diff --git a/transport.h b/transport.h
index 68669f14d0..b8e4ee8099 100644
--- a/transport.h
+++ b/transport.h
@@ -147,8 +147,7 @@ struct transport {
#define TRANSPORT_PUSH_ATOMIC 8192
#define TRANSPORT_PUSH_OPTIONS 16384
-#define TRANSPORT_SUMMARY_WIDTH (2 * DEFAULT_ABBREV + 3)
-#define TRANSPORT_SUMMARY(x) (int)(TRANSPORT_SUMMARY_WIDTH + strlen(x) - gettext_width(x)), (x)
+extern int transport_summary_width(const struct ref *refs);
/* Returns a transport suitable for the url */
struct transport *transport_get(struct remote *, const char *);
diff --git a/upload-pack.c b/upload-pack.c
index d9e381f291..e0db8b42be 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -398,13 +398,13 @@ static int get_common_commits(void)
if (multi_ack == 2 && got_common
&& !got_other && ok_to_give_up()) {
sent_ready = 1;
- packet_write(1, "ACK %s ready\n", last_hex);
+ packet_write_fmt(1, "ACK %s ready\n", last_hex);
}
if (have_obj.nr == 0 || multi_ack)
- packet_write(1, "NAK\n");
+ packet_write_fmt(1, "NAK\n");
if (no_done && sent_ready) {
- packet_write(1, "ACK %s\n", last_hex);
+ packet_write_fmt(1, "ACK %s\n", last_hex);
return 0;
}
if (stateless_rpc)
@@ -421,20 +421,20 @@ static int get_common_commits(void)
const char *hex = sha1_to_hex(sha1);
if (multi_ack == 2) {
sent_ready = 1;
- packet_write(1, "ACK %s ready\n", hex);
+ packet_write_fmt(1, "ACK %s ready\n", hex);
} else
- packet_write(1, "ACK %s continue\n", hex);
+ packet_write_fmt(1, "ACK %s continue\n", hex);
}
break;
default:
got_common = 1;
memcpy(last_hex, sha1_to_hex(sha1), 41);
if (multi_ack == 2)
- packet_write(1, "ACK %s common\n", last_hex);
+ packet_write_fmt(1, "ACK %s common\n", last_hex);
else if (multi_ack)
- packet_write(1, "ACK %s continue\n", last_hex);
+ packet_write_fmt(1, "ACK %s continue\n", last_hex);
else if (have_obj.nr == 1)
- packet_write(1, "ACK %s\n", last_hex);
+ packet_write_fmt(1, "ACK %s\n", last_hex);
break;
}
continue;
@@ -442,10 +442,10 @@ static int get_common_commits(void)
if (!strcmp(line, "done")) {
if (have_obj.nr > 0) {
if (multi_ack)
- packet_write(1, "ACK %s\n", last_hex);
+ packet_write_fmt(1, "ACK %s\n", last_hex);
return 0;
}
- packet_write(1, "NAK\n");
+ packet_write_fmt(1, "NAK\n");
return -1;
}
die("git upload-pack: expected SHA1 list, got '%s'", line);
@@ -638,8 +638,8 @@ static void send_shallow(struct commit_list *result)
while (result) {
struct object *object = &result->item->object;
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
- packet_write(1, "shallow %s",
- oid_to_hex(&object->oid));
+ packet_write_fmt(1, "shallow %s",
+ oid_to_hex(&object->oid));
register_shallow(object->oid.hash);
shallow_nr++;
}
@@ -655,8 +655,8 @@ static void send_unshallow(const struct object_array *shallows)
struct object *object = shallows->objects[i].item;
if (object->flags & NOT_SHALLOW) {
struct commit_list *parents;
- packet_write(1, "unshallow %s",
- oid_to_hex(&object->oid));
+ packet_write_fmt(1, "unshallow %s",
+ oid_to_hex(&object->oid));
object->flags &= ~CLIENT_SHALLOW;
/*
* We want to _register_ "object" as shallow, but we
@@ -932,7 +932,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, cb_data);
- packet_write(1, "%s %s%c%s%s%s%s%s agent=%s\n",
+ packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
(allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
@@ -944,11 +944,11 @@ static int send_ref(const char *refname, const struct object_id *oid,
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
- packet_write(1, "%s %s\n", oid_to_hex(oid), refname_nons);
+ packet_write_fmt(1, "%s %s\n", oid_to_hex(oid), refname_nons);
}
capabilities = NULL;
if (!peel_ref(refname, peeled.hash))
- packet_write(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons);
+ packet_write_fmt(1, "%s %s^{}\n", oid_to_hex(&peeled), refname_nons);
return 0;
}
diff --git a/write_or_die.c b/write_or_die.c
index 073443247a..eab8c8d0b9 100644
--- a/write_or_die.c
+++ b/write_or_die.c
@@ -1,19 +1,6 @@
#include "cache.h"
#include "run-command.h"
-static void check_pipe(int err)
-{
- if (err == EPIPE) {
- if (in_async())
- async_exit(141);
-
- signal(SIGPIPE, SIG_DFL);
- raise(SIGPIPE);
- /* Should never happen, but just in case... */
- exit(141);
- }
-}
-
/*
* Some cases use stdio, but want to flush after the write
* to get error handling (and to get better interactive
diff --git a/wt-status.c b/wt-status.c
index 0bd2781225..a2e9d332d8 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -438,7 +438,7 @@ static void wt_status_collect_changed_cb(struct diff_queue_struct *q,
switch (p->status) {
case DIFF_STATUS_ADDED:
- die("BUG: worktree status add???");
+ d->mode_worktree = p->two->mode;
break;
case DIFF_STATUS_DELETED:
@@ -548,6 +548,7 @@ static void wt_status_collect_changes_worktree(struct wt_status *s)
setup_revisions(0, NULL, &rev, NULL);
rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
DIFF_OPT_SET(&rev.diffopt, DIRTY_SUBMODULES);
+ rev.diffopt.ita_invisible_in_index = 1;
if (!s->show_untracked_files)
DIFF_OPT_SET(&rev.diffopt, IGNORE_UNTRACKED_IN_SUBMODULES);
if (s->ignore_submodule_arg) {
@@ -571,6 +572,7 @@ static void wt_status_collect_changes_index(struct wt_status *s)
setup_revisions(0, NULL, &rev, &opt);
DIFF_OPT_SET(&rev.diffopt, OVERRIDE_SUBMODULE_CONFIG);
+ rev.diffopt.ita_invisible_in_index = 1;
if (s->ignore_submodule_arg) {
handle_ignore_submodules_arg(&rev.diffopt, s->ignore_submodule_arg);
} else {
@@ -606,6 +608,8 @@ static void wt_status_collect_changes_initial(struct wt_status *s)
if (!ce_path_match(ce, &s->pathspec, NULL))
continue;
+ if (ce_intent_to_add(ce))
+ continue;
it = string_list_insert(&s->change, ce->name);
d = it->util;
if (!d) {
@@ -912,6 +916,7 @@ static void wt_longstatus_print_verbose(struct wt_status *s)
init_revisions(&rev, NULL);
DIFF_OPT_SET(&rev.diffopt, ALLOW_TEXTCONV);
+ rev.diffopt.ita_invisible_in_index = 1;
memset(&opt, 0, sizeof(opt));
opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference;