summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTodd Leonhardt <todd.leonhardt@gmail.com>2019-08-02 23:38:52 -0400
committerGitHub <noreply@github.com>2019-08-02 23:38:52 -0400
commita53b76a2b1cbd257eb5b359a93e2e12604dea7c9 (patch)
tree07380a7ced0b7499f97d90ed9f6510dcb47ed966
parent54e30144d153b612563a1b8e280da28f10ff22a3 (diff)
parentf1c87a9c220d3a904570f119c6a80bdef225008a (diff)
downloadcmd2-git-a53b76a2b1cbd257eb5b359a93e2e12604dea7c9.tar.gz
Merge pull request #749 from python-cmd2/termination
Added terminators to completion delimiters
-rwxr-xr-xcmd2/cmd2.py55
-rwxr-xr-xcmd2/parsing.py4
-rwxr-xr-xtests/test_completion.py15
3 files changed, 14 insertions, 60 deletions
diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py
index 5ae83f18..514d8a05 100755
--- a/cmd2/cmd2.py
+++ b/cmd2/cmd2.py
@@ -824,56 +824,8 @@ class Cmd(cmd.Cmd):
# Return empty lists since this means the line is malformed.
return [], []
- # We need to treat redirection characters (|, <, >) as word breaks when they are in unquoted strings.
- # Go through each token and further split them on these characters. Each run of redirect characters
- # is treated as a single token.
- raw_tokens = []
-
- for cur_initial_token in initial_tokens:
-
- # Save tokens up to 1 character in length or quoted tokens. No need to parse these.
- if len(cur_initial_token) <= 1 or cur_initial_token[0] in constants.QUOTES:
- raw_tokens.append(cur_initial_token)
- continue
-
- # Iterate over each character in this token
- cur_index = 0
- cur_char = cur_initial_token[cur_index]
-
- # Keep track of the token we are building
- cur_raw_token = ''
-
- while True:
- if cur_char not in constants.REDIRECTION_CHARS:
-
- # Keep appending to cur_raw_token until we hit a redirect char
- while cur_char not in constants.REDIRECTION_CHARS:
- cur_raw_token += cur_char
- cur_index += 1
- if cur_index < len(cur_initial_token):
- cur_char = cur_initial_token[cur_index]
- else:
- break
-
- else:
- redirect_char = cur_char
-
- # Keep appending to cur_raw_token until we hit something other than redirect_char
- while cur_char == redirect_char:
- cur_raw_token += cur_char
- cur_index += 1
- if cur_index < len(cur_initial_token):
- cur_char = cur_initial_token[cur_index]
- else:
- break
-
- # Save the current token
- raw_tokens.append(cur_raw_token)
- cur_raw_token = ''
-
- # Check if we've viewed all characters
- if cur_index >= len(cur_initial_token):
- break
+ # Further split tokens on punctuation characters
+ raw_tokens = self.statement_parser.split_on_punctuation(initial_tokens)
# Save the unquoted tokens
tokens = [utils.strip_quotes(cur_token) for cur_token in raw_tokens]
@@ -2299,10 +2251,11 @@ class Cmd(cmd.Cmd):
readline_settings.completer = readline.get_completer()
readline.set_completer(self.complete)
- # Break words on whitespace, quotes, and redirectors when tab completing
+ # Set the readline word delimiters for completion
completer_delims = " \t\n"
completer_delims += ''.join(constants.QUOTES)
completer_delims += ''.join(constants.REDIRECTION_CHARS)
+ completer_delims += ''.join(self.statement_parser.terminators)
readline_settings.delims = readline.get_completer_delims()
readline.set_completer_delims(completer_delims)
diff --git a/cmd2/parsing.py b/cmd2/parsing.py
index 707140f7..84b2468e 100755
--- a/cmd2/parsing.py
+++ b/cmd2/parsing.py
@@ -379,7 +379,7 @@ class StatementParser:
tokens = shlex_split(line)
# custom lexing
- tokens = self._split_on_punctuation(tokens)
+ tokens = self.split_on_punctuation(tokens)
return tokens
def parse(self, line: str, *, expand: bool = True) -> Statement:
@@ -675,7 +675,7 @@ class StatementParser:
return command, args
- def _split_on_punctuation(self, tokens: List[str]) -> List[str]:
+ def split_on_punctuation(self, tokens: List[str]) -> List[str]:
"""Further splits tokens from a command line using punctuation characters
Punctuation characters are treated as word breaks when they are in
diff --git a/tests/test_completion.py b/tests/test_completion.py
index dcca1c7d..cf5dcf75 100755
--- a/tests/test_completion.py
+++ b/tests/test_completion.py
@@ -689,26 +689,27 @@ def test_tokens_for_completion_unclosed_quote(cmd2_app):
assert expected_tokens == tokens
assert expected_raw_tokens == raw_tokens
-def test_tokens_for_completion_redirect(cmd2_app):
- text = '>>file'
- line = 'command | < {}'.format(text)
+def test_tokens_for_completion_punctuation(cmd2_app):
+ """Test that redirectors and terminators are word delimiters"""
+ text = 'file'
+ line = 'command | < ;>>{}'.format(text)
endidx = len(line)
begidx = endidx - len(text)
- expected_tokens = ['command', '|', '<', '>>', 'file']
- expected_raw_tokens = ['command', '|', '<', '>>', 'file']
+ expected_tokens = ['command', '|', '<', ';', '>>', 'file']
+ expected_raw_tokens = ['command', '|', '<', ';', '>>', 'file']
tokens, raw_tokens = cmd2_app.tokens_for_completion(line, begidx, endidx)
assert expected_tokens == tokens
assert expected_raw_tokens == raw_tokens
-def test_tokens_for_completion_quoted_redirect(cmd2_app):
+def test_tokens_for_completion_quoted_punctuation(cmd2_app):
+ """Test that quoted punctuation characters are not word delimiters"""
text = '>file'
line = 'command "{}'.format(text)
endidx = len(line)
begidx = endidx - len(text)
- cmd2_app.statement_parser.redirection = True
expected_tokens = ['command', '>file']
expected_raw_tokens = ['command', '">file']