summaryrefslogtreecommitdiff
path: root/cmd2/parsing.py
diff options
context:
space:
mode:
authorkotfu <kotfu@kotfu.net>2018-05-06 00:20:46 -0600
committerkotfu <kotfu@kotfu.net>2018-05-06 00:20:46 -0600
commita0c0db15103a54dba20fb309956a7b3cf90bc645 (patch)
tree6b60f99846784c357d9fac31116fb5680607dc86 /cmd2/parsing.py
parent537542b8492f3c4d1c56296804ae82c123d0efce (diff)
downloadcmd2-git-a0c0db15103a54dba20fb309956a7b3cf90bc645.tar.gz
Fix alias expansion when not followed by whitespace
Diffstat (limited to 'cmd2/parsing.py')
-rw-r--r--cmd2/parsing.py27
1 files changed, 20 insertions, 7 deletions
diff --git a/cmd2/parsing.py b/cmd2/parsing.py
index 908e9272..eff29843 100644
--- a/cmd2/parsing.py
+++ b/cmd2/parsing.py
@@ -144,13 +144,26 @@ class StatementParser():
# aliases have to be a word, so make a regular expression
# that matches the first word in the line. This regex has two
# parts, the first parenthesis enclosed group matches one
- # or more non-whitespace characters, and the second group
- # matches either a whitespace character or the end of the
- # string. We use \A and \Z to ensure we always match the
- # beginning and end of a string that may have multiple
- # lines
- self.command_pattern = re.compile(r'\A(\S+)(\s|\Z)')
-
+ # or more non-whitespace characters with a non-greedy match
+ # (that's what the '+?' part does). The second group must be
+ # dynamically created because it needs to match either whitespace,
+ # something in REDIRECTION_CHARS, one of the terminators,
+ # or the end of the string. We use \A and \Z to ensure we always
+ # match the beginning and end of a string that may have multiple
+ # lines (if it's a multiline command)
+ second_group_items = []
+ second_group_items.extend(constants.REDIRECTION_CHARS)
+ second_group_items.extend(terminators)
+ # escape each item so it will for sure get treated as a literal
+ second_group_items = [re.escape(x) for x in second_group_items]
+ # add the whitespace and end of string, not escaped because they
+ # are not literals
+ second_group_items.extend([r'\s', r'\Z'])
+ # join them up with a pipe
+ second_group = '|'.join(second_group_items)
+ # build the regular expression
+ expr = r'\A(\S+?)({})'.format(second_group)
+ self.command_pattern = re.compile(expr)
def tokenize(self, line: str) -> List[str]:
"""Lex a string into a list of tokens.