summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/test_asm.py4
-rw-r--r--tests/test_basic.py2
-rw-r--r--tests/test_basic_api.py20
-rw-r--r--tests/test_bibtex.py290
-rw-r--r--tests/test_cfm.py32
-rw-r--r--tests/test_clexer.py306
-rw-r--r--tests/test_cpp.py12
-rw-r--r--tests/test_csound.py490
-rw-r--r--tests/test_data.py102
-rw-r--r--tests/test_examplefiles.py10
-rw-r--r--tests/test_ezhil.py246
-rw-r--r--tests/test_grammar_notation.py94
-rw-r--r--tests/test_html_formatter.py6
-rw-r--r--tests/test_idris.py74
-rw-r--r--tests/test_irc_formatter.py2
-rw-r--r--tests/test_java.py32
-rw-r--r--tests/test_julia.py64
-rw-r--r--tests/test_kotlin.py158
-rw-r--r--tests/test_objectiveclexer.py84
-rw-r--r--tests/test_praat.py230
-rw-r--r--tests/test_promql.py18
-rw-r--r--tests/test_python.py118
-rw-r--r--tests/test_qbasiclexer.py34
-rw-r--r--tests/test_r.py76
-rw-r--r--tests/test_regexlexer.py2
-rw-r--r--tests/test_rtf_formatter.py32
-rw-r--r--tests/test_ruby.py186
-rw-r--r--tests/test_shell.py232
-rw-r--r--tests/test_smarty.py30
-rw-r--r--tests/test_sql.py2
-rw-r--r--tests/test_textfmts.py64
-rwxr-xr-x[-rw-r--r--]tests/test_usd.py426
-rw-r--r--tests/test_util.py29
-rw-r--r--tests/test_whiley.py14
-rw-r--r--tests/test_yang.py76
35 files changed, 1794 insertions, 1803 deletions
diff --git a/tests/test_asm.py b/tests/test_asm.py
index d351ce30..23e9f344 100644
--- a/tests/test_asm.py
+++ b/tests/test_asm.py
@@ -62,7 +62,7 @@ def test_cpuid(lexer_nasm):
# cpu id, but as a single token. See bug #1517
fragment = 'cpuid'
expected = [
- (Token.Name.Function, u'cpuid'),
- (Token.Text, u'\n'),
+ (Token.Name.Function, 'cpuid'),
+ (Token.Text, '\n'),
]
assert expected == list(lexer_nasm.get_tokens(fragment))
diff --git a/tests/test_basic.py b/tests/test_basic.py
index e3826b28..384be6ec 100644
--- a/tests/test_basic.py
+++ b/tests/test_basic.py
@@ -57,7 +57,7 @@ def test_can_lex_integer(lexer):
def test_can_lex_names(lexer):
- assert_are_tokens_of_type(lexer, u'thingy thingy123 _thingy _123', Name)
+ assert_are_tokens_of_type(lexer, 'thingy thingy123 _thingy _123', Name)
def test_can_recover_after_unterminated_string(lexer):
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 378ea5e4..bbfbb14e 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -173,20 +173,20 @@ def test_formatter_encodings():
# unicode output
fmt = HtmlFormatter()
- tokens = [(Text, u"ä")]
+ tokens = [(Text, "ä")]
out = format(tokens, fmt)
assert type(out) is str
- assert u"ä" in out
+ assert "ä" in out
# encoding option
fmt = HtmlFormatter(encoding="latin1")
- tokens = [(Text, u"ä")]
- assert u"ä".encode("latin1") in format(tokens, fmt)
+ tokens = [(Text, "ä")]
+ assert "ä".encode("latin1") in format(tokens, fmt)
# encoding and outencoding option
fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
- tokens = [(Text, u"ä")]
- assert u"ä".encode("utf8") in format(tokens, fmt)
+ tokens = [(Text, "ä")]
+ assert "ä".encode("utf8") in format(tokens, fmt)
@pytest.mark.parametrize('cls', [getattr(formatters, name)
@@ -307,7 +307,7 @@ class TestFilters:
def test_codetag(self):
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
- text = u'# BUG: text'
+ text = '# BUG: text'
tokens = list(lx.get_tokens(text))
assert '# ' == tokens[0][1]
assert 'BUG' == tokens[1][1]
@@ -316,15 +316,15 @@ class TestFilters:
# ticket #368
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
- text = u'# DEBUG: text'
+ text = '# DEBUG: text'
tokens = list(lx.get_tokens(text))
assert '# DEBUG: text' == tokens[0][1]
def test_symbols(self):
lx = lexers.IsabelleLexer()
lx.add_filter('symbols')
- text = u'lemma "A \\<Longrightarrow> B"'
+ text = 'lemma "A \\<Longrightarrow> B"'
tokens = list(lx.get_tokens(text))
assert 'lemma' == tokens[0][1]
assert 'A ' == tokens[3][1]
- assert u'\U000027f9' == tokens[4][1]
+ assert '\U000027f9' == tokens[4][1]
diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py
index dfa668f2..d7bc02ac 100644
--- a/tests/test_bibtex.py
+++ b/tests/test_bibtex.py
@@ -21,39 +21,39 @@ def lexer():
def test_preamble(lexer):
- data = u'@PREAMBLE{"% some LaTeX code here"}'
+ data = '@PREAMBLE{"% some LaTeX code here"}'
tokens = [
- (Token.Name.Class, u'@PREAMBLE'),
- (Token.Punctuation, u'{'),
- (Token.String, u'"'),
- (Token.String, u'% some LaTeX code here'),
- (Token.String, u'"'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Name.Class, '@PREAMBLE'),
+ (Token.Punctuation, '{'),
+ (Token.String, '"'),
+ (Token.String, '% some LaTeX code here'),
+ (Token.String, '"'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(data)) == tokens
def test_string(lexer):
- data = u'@STRING(SCI = "Science")'
+ data = '@STRING(SCI = "Science")'
tokens = [
- (Token.Name.Class, u'@STRING'),
- (Token.Punctuation, u'('),
- (Token.Name.Attribute, u'SCI'),
- (Token.Text, u' '),
- (Token.Punctuation, u'='),
- (Token.Text, u' '),
- (Token.String, u'"'),
- (Token.String, u'Science'),
- (Token.String, u'"'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Name.Class, '@STRING'),
+ (Token.Punctuation, '('),
+ (Token.Name.Attribute, 'SCI'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '='),
+ (Token.Text, ' '),
+ (Token.String, '"'),
+ (Token.String, 'Science'),
+ (Token.String, '"'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(data)) == tokens
def test_entry(lexer):
- data = u"""
+ data = """
This is a comment.
@ARTICLE{ruckenstein-diffusion,
@@ -65,56 +65,56 @@ def test_entry(lexer):
"""
tokens = [
- (Token.Comment, u'This is a comment.'),
- (Token.Text, u'\n\n'),
- (Token.Name.Class, u'@ARTICLE'),
- (Token.Punctuation, u'{'),
- (Token.Name.Label, u'ruckenstein-diffusion'),
- (Token.Punctuation, u','),
- (Token.Text, u'\n '),
- (Token.Name.Attribute, u'author'),
- (Token.Text, u' '),
- (Token.Punctuation, u'='),
- (Token.Text, u' '),
- (Token.String, u'"'),
- (Token.String, u'Liu, Hongquin'),
- (Token.String, u'"'),
- (Token.Text, u' '),
- (Token.Punctuation, u'#'),
- (Token.Text, u' '),
- (Token.Name.Variable, u'and'),
- (Token.Text, u' '),
- (Token.Punctuation, u'#'),
- (Token.Text, u' '),
- (Token.String, u'"'),
- (Token.String, u'Ruckenstein, Eli'),
- (Token.String, u'"'),
- (Token.Punctuation, u','),
- (Token.Text, u'\n '),
- (Token.Name.Attribute, u'year'),
- (Token.Text, u' '),
- (Token.Punctuation, u'='),
- (Token.Text, u' '),
- (Token.Number, u'1997'),
- (Token.Punctuation, u','),
- (Token.Text, u'\n '),
- (Token.Name.Attribute, u'month'),
- (Token.Text, u' '),
- (Token.Punctuation, u'='),
- (Token.Text, u' '),
- (Token.Name.Variable, u'JAN'),
- (Token.Punctuation, u','),
- (Token.Text, u'\n '),
- (Token.Name.Attribute, u'pages'),
- (Token.Text, u' '),
- (Token.Punctuation, u'='),
- (Token.Text, u' '),
- (Token.String, u'"'),
- (Token.String, u'888-895'),
- (Token.String, u'"'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Comment, 'This is a comment.'),
+ (Token.Text, '\n\n'),
+ (Token.Name.Class, '@ARTICLE'),
+ (Token.Punctuation, '{'),
+ (Token.Name.Label, 'ruckenstein-diffusion'),
+ (Token.Punctuation, ','),
+ (Token.Text, '\n '),
+ (Token.Name.Attribute, 'author'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '='),
+ (Token.Text, ' '),
+ (Token.String, '"'),
+ (Token.String, 'Liu, Hongquin'),
+ (Token.String, '"'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '#'),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 'and'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '#'),
+ (Token.Text, ' '),
+ (Token.String, '"'),
+ (Token.String, 'Ruckenstein, Eli'),
+ (Token.String, '"'),
+ (Token.Punctuation, ','),
+ (Token.Text, '\n '),
+ (Token.Name.Attribute, 'year'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '='),
+ (Token.Text, ' '),
+ (Token.Number, '1997'),
+ (Token.Punctuation, ','),
+ (Token.Text, '\n '),
+ (Token.Name.Attribute, 'month'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '='),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 'JAN'),
+ (Token.Punctuation, ','),
+ (Token.Text, '\n '),
+ (Token.Name.Attribute, 'pages'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '='),
+ (Token.Text, ' '),
+ (Token.String, '"'),
+ (Token.String, '888-895'),
+ (Token.String, '"'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens
@@ -122,9 +122,9 @@ def test_entry(lexer):
def test_comment(lexer):
data = '@COMMENT{test}'
tokens = [
- (Token.Comment, u'@COMMENT'),
- (Token.Comment, u'{test}'),
- (Token.Text, u'\n'),
+ (Token.Comment, '@COMMENT'),
+ (Token.Comment, '{test}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(data)) == tokens
@@ -132,12 +132,12 @@ def test_comment(lexer):
def test_missing_body(lexer):
data = '@ARTICLE xxx'
tokens = [
- (Token.Name.Class, u'@ARTICLE'),
- (Token.Text, u' '),
- (Token.Error, u'x'),
- (Token.Error, u'x'),
- (Token.Error, u'x'),
- (Token.Text, u'\n'),
+ (Token.Name.Class, '@ARTICLE'),
+ (Token.Text, ' '),
+ (Token.Error, 'x'),
+ (Token.Error, 'x'),
+ (Token.Error, 'x'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(data)) == tokens
@@ -145,12 +145,12 @@ def test_missing_body(lexer):
def test_mismatched_brace(lexer):
data = '@PREAMBLE(""}'
tokens = [
- (Token.Name.Class, u'@PREAMBLE'),
- (Token.Punctuation, u'('),
- (Token.String, u'"'),
- (Token.String, u'"'),
- (Token.Error, u'}'),
- (Token.Text, u'\n'),
+ (Token.Name.Class, '@PREAMBLE'),
+ (Token.Punctuation, '('),
+ (Token.String, '"'),
+ (Token.String, '"'),
+ (Token.Error, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(data)) == tokens
@@ -177,64 +177,64 @@ def test_basic_bst():
"""
tokens = [
(Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"),
- (Token.Text, u'\n\n'),
- (Token.Keyword, u'INTEGERS'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Text, u' '),
- (Token.Name.Variable, u'output.state'),
- (Token.Text, u' '),
- (Token.Name.Variable, u'before.all'),
- (Token.Text, u' '),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n\n'),
- (Token.Keyword, u'FUNCTION'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Name.Variable, u'sort.format.title'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u' '),
- (Token.Name.Function, u"'t"),
- (Token.Text, u' '),
- (Token.Name.Variable, u':='),
- (Token.Text, u'\n'),
- (Token.Literal.String, u'"A "'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'#2'),
- (Token.Text, u'\n '),
- (Token.Literal.String, u'"An "'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'#3'),
- (Token.Text, u'\n '),
- (Token.Literal.String, u'"The "'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'#4'),
- (Token.Text, u' '),
- (Token.Name.Variable, u't'),
- (Token.Text, u' '),
- (Token.Name.Variable, u'chop.word'),
- (Token.Text, u'\n '),
- (Token.Name.Variable, u'chop.word'),
- (Token.Text, u'\n'),
- (Token.Name.Variable, u'chop.word'),
- (Token.Text, u'\n'),
- (Token.Name.Variable, u'sortify'),
- (Token.Text, u'\n'),
- (Token.Literal.Number, u'#1'),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'global.max$'),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'substring$'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n\n'),
- (Token.Keyword, u'ITERATE'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Name.Builtin, u'call.type$'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Text, '\n\n'),
+ (Token.Keyword, 'INTEGERS'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 'output.state'),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 'before.all'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n\n'),
+ (Token.Keyword, 'FUNCTION'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Name.Variable, 'sort.format.title'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, ' '),
+ (Token.Name.Function, "'t"),
+ (Token.Text, ' '),
+ (Token.Name.Variable, ':='),
+ (Token.Text, '\n'),
+ (Token.Literal.String, '"A "'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '#2'),
+ (Token.Text, '\n '),
+ (Token.Literal.String, '"An "'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '#3'),
+ (Token.Text, '\n '),
+ (Token.Literal.String, '"The "'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '#4'),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 't'),
+ (Token.Text, ' '),
+ (Token.Name.Variable, 'chop.word'),
+ (Token.Text, '\n '),
+ (Token.Name.Variable, 'chop.word'),
+ (Token.Text, '\n'),
+ (Token.Name.Variable, 'chop.word'),
+ (Token.Text, '\n'),
+ (Token.Name.Variable, 'sortify'),
+ (Token.Text, '\n'),
+ (Token.Literal.Number, '#1'),
+ (Token.Text, ' '),
+ (Token.Name.Builtin, 'global.max$'),
+ (Token.Text, ' '),
+ (Token.Name.Builtin, 'substring$'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n\n'),
+ (Token.Keyword, 'ITERATE'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Name.Builtin, 'call.type$'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens
diff --git a/tests/test_cfm.py b/tests/test_cfm.py
index a1600944..a89e160d 100644
--- a/tests/test_cfm.py
+++ b/tests/test_cfm.py
@@ -19,28 +19,28 @@ def lexer():
def test_basic_comment(lexer):
- fragment = u'<!--- cfcomment --->'
+ fragment = '<!--- cfcomment --->'
expected = [
- (Token.Text, u''),
- (Token.Comment.Multiline, u'<!---'),
- (Token.Comment.Multiline, u' cfcomment '),
- (Token.Comment.Multiline, u'--->'),
- (Token.Text, u'\n'),
+ (Token.Text, ''),
+ (Token.Comment.Multiline, '<!---'),
+ (Token.Comment.Multiline, ' cfcomment '),
+ (Token.Comment.Multiline, '--->'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_nested_comment(lexer):
- fragment = u'<!--- nested <!--- cfcomment ---> --->'
+ fragment = '<!--- nested <!--- cfcomment ---> --->'
expected = [
- (Token.Text, u''),
- (Token.Comment.Multiline, u'<!---'),
- (Token.Comment.Multiline, u' nested '),
- (Token.Comment.Multiline, u'<!---'),
- (Token.Comment.Multiline, u' cfcomment '),
- (Token.Comment.Multiline, u'--->'),
- (Token.Comment.Multiline, u' '),
- (Token.Comment.Multiline, u'--->'),
- (Token.Text, u'\n'),
+ (Token.Text, ''),
+ (Token.Comment.Multiline, '<!---'),
+ (Token.Comment.Multiline, ' nested '),
+ (Token.Comment.Multiline, '<!---'),
+ (Token.Comment.Multiline, ' cfcomment '),
+ (Token.Comment.Multiline, '--->'),
+ (Token.Comment.Multiline, ' '),
+ (Token.Comment.Multiline, '--->'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_clexer.py b/tests/test_clexer.py
index d40ec491..d6561243 100644
--- a/tests/test_clexer.py
+++ b/tests/test_clexer.py
@@ -33,7 +33,7 @@ def test_numbers(lexer):
def test_switch(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
switch (0)
@@ -45,48 +45,48 @@ def test_switch(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'switch'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'case'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'default'),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'switch'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'case'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'default'),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_switch_space_before_colon(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
switch (0)
@@ -98,50 +98,50 @@ def test_switch_space_before_colon(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'switch'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'case'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Text, u' '),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'default'),
- (Token.Text, u' '),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'switch'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'case'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Text, ' '),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'default'),
+ (Token.Text, ' '),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo:
@@ -149,31 +149,31 @@ def test_label(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Punctuation, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label_space_before_colon(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo :
@@ -181,32 +181,32 @@ def test_label_space_before_colon(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Text, u' '),
- (Token.Punctuation, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label_followed_by_statement(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo:return 0;
@@ -214,52 +214,52 @@ def test_label_followed_by_statement(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Punctuation, u':'),
- (Token.Keyword, u'return'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Punctuation, ':'),
+ (Token.Keyword, 'return'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_preproc_file(lexer):
- fragment = u'#include <foo>\n'
+ fragment = '#include <foo>\n'
tokens = [
- (Token.Comment.Preproc, u'#'),
- (Token.Comment.Preproc, u'include'),
- (Token.Text, u' '),
- (Token.Comment.PreprocFile, u'<foo>'),
- (Token.Comment.Preproc, u'\n'),
+ (Token.Comment.Preproc, '#'),
+ (Token.Comment.Preproc, 'include'),
+ (Token.Text, ' '),
+ (Token.Comment.PreprocFile, '<foo>'),
+ (Token.Comment.Preproc, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_preproc_file2(lexer):
- fragment = u'#include "foo.h"\n'
+ fragment = '#include "foo.h"\n'
tokens = [
- (Token.Comment.Preproc, u'#'),
- (Token.Comment.Preproc, u'include'),
- (Token.Text, u' '),
- (Token.Comment.PreprocFile, u'"foo.h"'),
- (Token.Comment.Preproc, u'\n'),
+ (Token.Comment.Preproc, '#'),
+ (Token.Comment.Preproc, 'include'),
+ (Token.Text, ' '),
+ (Token.Comment.PreprocFile, '"foo.h"'),
+ (Token.Comment.Preproc, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_cpp.py b/tests/test_cpp.py
index a3ef33a3..e847079d 100644
--- a/tests/test_cpp.py
+++ b/tests/test_cpp.py
@@ -21,18 +21,18 @@ def lexer():
def test_good_comment(lexer):
- fragment = u'/* foo */\n'
+ fragment = '/* foo */\n'
tokens = [
- (Token.Comment.Multiline, u'/* foo */'),
- (Token.Text, u'\n'),
+ (Token.Comment.Multiline, '/* foo */'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_open_comment(lexer):
- fragment = u'/* foo\n'
+ fragment = '/* foo\n'
tokens = [
- (Token.Comment.Multiline, u'/* foo\n'),
+ (Token.Comment.Multiline, '/* foo\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -52,4 +52,4 @@ def test_guess_c_lexer():
}
'''
lexer = guess_lexer(code)
- assert isinstance(lexer, CLexer) \ No newline at end of file
+ assert isinstance(lexer, CLexer)
diff --git a/tests/test_csound.py b/tests/test_csound.py
index 0186da7a..26672043 100644
--- a/tests/test_csound.py
+++ b/tests/test_csound.py
@@ -30,12 +30,12 @@ def test_comments(lexer):
// comment
''')
tokens = [
- (Comment.Multiline, u'/*\n * comment\n */'),
- (Text, u'\n'),
- (Comment.Single, u'; comment'),
- (Text, u'\n'),
- (Comment.Single, u'// comment'),
- (Text, u'\n')
+ (Comment.Multiline, '/*\n * comment\n */'),
+ (Text, '\n'),
+ (Comment.Single, '; comment'),
+ (Text, '\n'),
+ (Comment.Single, '// comment'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -48,38 +48,38 @@ def test_instrument_blocks(lexer):
endin
''')
tokens = [
- (Keyword.Declaration, u'instr'),
- (Comment.Multiline, u'/**/'),
- (Name.Function, u'1'),
- (Punctuation, u','),
- (Comment.Multiline, u'/**/'),
- (Name.Function, u'N_a_M_e_'),
- (Punctuation, u','),
- (Comment.Multiline, u'/**/'),
- (Punctuation, u'+'),
- (Name.Function, u'Name'),
- (Comment.Multiline, u'/**/'),
- (Comment.Single, u'//'),
- (Text, u'\n'),
- (Text, u' '),
- (Keyword.Type, u'i'),
- (Name, u'Duration'),
- (Text, u' '),
- (Operator, u'='),
- (Text, u' '),
- (Name.Variable.Instance, u'p3'),
- (Text, u'\n'),
- (Text, u' '),
- (Name.Builtin, u'outc'),
- (Punctuation, u':'),
- (Keyword.Type, u'a'),
- (Punctuation, u'('),
- (Keyword.Type, u'a'),
- (Name, u'Signal'),
- (Punctuation, u')'),
- (Text, u'\n'),
- (Keyword.Declaration, u'endin'),
- (Text, u'\n')
+ (Keyword.Declaration, 'instr'),
+ (Comment.Multiline, '/**/'),
+ (Name.Function, '1'),
+ (Punctuation, ','),
+ (Comment.Multiline, '/**/'),
+ (Name.Function, 'N_a_M_e_'),
+ (Punctuation, ','),
+ (Comment.Multiline, '/**/'),
+ (Punctuation, '+'),
+ (Name.Function, 'Name'),
+ (Comment.Multiline, '/**/'),
+ (Comment.Single, '//'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Keyword.Type, 'i'),
+ (Name, 'Duration'),
+ (Text, ' '),
+ (Operator, '='),
+ (Text, ' '),
+ (Name.Variable.Instance, 'p3'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Name.Builtin, 'outc'),
+ (Punctuation, ':'),
+ (Keyword.Type, 'a'),
+ (Punctuation, '('),
+ (Keyword.Type, 'a'),
+ (Name, 'Signal'),
+ (Punctuation, ')'),
+ (Text, '\n'),
+ (Keyword.Declaration, 'endin'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -91,22 +91,22 @@ def test_user_defined_opcodes(lexer):
endop
''')
tokens = [
- (Keyword.Declaration, u'opcode'),
- (Comment.Multiline, u'/**/'),
- (Name.Function, u'aUDO'),
- (Punctuation, u','),
- (Comment.Multiline, u'/**/'),
- (Keyword.Type, u'i[]'),
- (Punctuation, u','),
- (Comment.Multiline, u'/**/'),
- (Keyword.Type, u'aik'),
- (Comment.Single, u'//'),
- (Text, u'\n'),
- (Text, u' '),
- (Name.Function, u'aUDO'),
- (Text, u'\n'),
- (Keyword.Declaration, u'endop'),
- (Text, u'\n')
+ (Keyword.Declaration, 'opcode'),
+ (Comment.Multiline, '/**/'),
+ (Name.Function, 'aUDO'),
+ (Punctuation, ','),
+ (Comment.Multiline, '/**/'),
+ (Keyword.Type, 'i[]'),
+ (Punctuation, ','),
+ (Comment.Multiline, '/**/'),
+ (Keyword.Type, 'aik'),
+ (Comment.Single, '//'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Name.Function, 'aUDO'),
+ (Text, '\n'),
+ (Keyword.Declaration, 'endop'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -114,27 +114,27 @@ def test_user_defined_opcodes(lexer):
def test_numbers(lexer):
fragment = '123 0123456789'
tokens = [
- (Number.Integer, u'123'),
- (Text, u' '),
- (Number.Integer, u'0123456789'),
- (Text, u'\n')
+ (Number.Integer, '123'),
+ (Text, ' '),
+ (Number.Integer, '0123456789'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
fragment = '0xabcdef0123456789 0XABCDEF'
tokens = [
- (Keyword.Type, u'0x'),
- (Number.Hex, u'abcdef0123456789'),
- (Text, u' '),
- (Keyword.Type, u'0X'),
- (Number.Hex, u'ABCDEF'),
- (Text, u'\n')
+ (Keyword.Type, '0x'),
+ (Number.Hex, 'abcdef0123456789'),
+ (Text, ' '),
+ (Keyword.Type, '0X'),
+ (Number.Hex, 'ABCDEF'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
fragments = ['1e2', '3e+4', '5e-6', '7E8', '9E+0', '1E-2', '3.', '4.56', '.789']
for fragment in fragments:
tokens = [
(Number.Float, fragment),
- (Text, u'\n')
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -142,11 +142,11 @@ def test_numbers(lexer):
def test_quoted_strings(lexer):
fragment = '"characters$MACRO."'
tokens = [
- (String, u'"'),
- (String, u'characters'),
- (Comment.Preproc, u'$MACRO.'),
- (String, u'"'),
- (Text, u'\n')
+ (String, '"'),
+ (String, 'characters'),
+ (Comment.Preproc, '$MACRO.'),
+ (String, '"'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -158,10 +158,10 @@ def test_braced_strings(lexer):
}}
''')
tokens = [
- (String, u'{{'),
- (String, u'\ncharacters$MACRO.\n'),
- (String, u'}}'),
- (Text, u'\n')
+ (String, '{{'),
+ (String, '\ncharacters$MACRO.\n'),
+ (String, '}}'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -171,30 +171,30 @@ def test_escape_sequences(lexer):
escapedCharacter = '\\' + character
fragment = '"' + escapedCharacter + '"'
tokens = [
- (String, u'"'),
+ (String, '"'),
(String.Escape, escapedCharacter),
- (String, u'"'),
- (Text, u'\n')
+ (String, '"'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
fragment = '{{' + escapedCharacter + '}}'
tokens = [
- (String, u'{{'),
+ (String, '{{'),
(String.Escape, escapedCharacter),
- (String, u'}}'),
- (Text, u'\n')
+ (String, '}}'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_operators(lexer):
- fragments = ['+', '-', '~', u'¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>',
+ fragments = ['+', '-', '~', '¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>',
'<=', '>=', '==', '!=', '&', '#', '|', '&&', '||', '?', ':', '+=',
'-=', '*=', '/=']
for fragment in fragments:
tokens = [
(Operator, fragment),
- (Text, u'\n')
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -203,7 +203,7 @@ def test_global_value_identifiers(lexer):
for fragment in ['0dbfs', 'A4', 'kr', 'ksmps', 'nchnls', 'nchnls_i', 'sr']:
tokens = [
(Name.Variable.Global, fragment),
- (Text, u'\n')
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -214,13 +214,13 @@ def test_keywords(lexer):
for fragment in fragments:
tokens = [
(Keyword, fragment),
- (Text, u'\n')
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
for fragment in ['return', 'rireturn']:
tokens = [
(Keyword.Pseudo, fragment),
- (Text, u'\n')
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -231,13 +231,13 @@ def test_labels(lexer):
label2:
''')
tokens = [
- (Name.Label, u'aLabel'),
- (Punctuation, u':'),
- (Text, u'\n'),
- (Text, u' '),
- (Name.Label, u'label2'),
- (Punctuation, u':'),
- (Text, u'\n')
+ (Name.Label, 'aLabel'),
+ (Punctuation, ':'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Name.Label, 'label2'),
+ (Punctuation, ':'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -251,11 +251,11 @@ def test_printks_and_prints_escape_sequences(lexer):
fragment = opcode + ' "' + escapedCharacter + '"'
tokens = [
(Name.Builtin, opcode),
- (Text, u' '),
- (String, u'"'),
+ (Text, ' '),
+ (String, '"'),
(String.Escape, escapedCharacter),
- (String, u'"'),
- (Text, u'\n')
+ (String, '"'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -265,64 +265,64 @@ def test_goto_statements(lexer):
fragment = keyword + ' aLabel'
tokens = [
(Keyword, keyword),
- (Text, u' '),
- (Name.Label, u'aLabel'),
- (Text, u'\n')
+ (Text, ' '),
+ (Name.Label, 'aLabel'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
for opcode in ['reinit', 'rigoto', 'tigoto']:
fragment = opcode + ' aLabel'
tokens = [
(Keyword.Pseudo, opcode),
- (Text, u' '),
- (Name.Label, u'aLabel'),
- (Text, u'\n')
+ (Text, ' '),
+ (Name.Label, 'aLabel'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
for opcode in ['cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto', 'cnkgoto']:
fragment = opcode + ' 1==0, aLabel'
tokens = [
(Keyword.Pseudo, opcode),
- (Text, u' '),
- (Number.Integer, u'1'),
- (Operator, u'=='),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Name.Label, u'aLabel'),
- (Text, u'\n')
+ (Text, ' '),
+ (Number.Integer, '1'),
+ (Operator, '=='),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Name.Label, 'aLabel'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
fragment = 'timout 0, 0, aLabel'
tokens = [
(Keyword.Pseudo, 'timout'),
- (Text, u' '),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Name.Label, u'aLabel'),
- (Text, u'\n')
+ (Text, ' '),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Name.Label, 'aLabel'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
for opcode in ['loop_ge', 'loop_gt', 'loop_le', 'loop_lt']:
fragment = opcode + ' 0, 0, 0, aLabel'
tokens = [
(Keyword.Pseudo, opcode),
- (Text, u' '),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Number.Integer, u'0'),
- (Punctuation, u','),
- (Text, u' '),
- (Name.Label, u'aLabel'),
- (Text, u'\n')
+ (Text, ' '),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Number.Integer, '0'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Name.Label, 'aLabel'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -331,10 +331,10 @@ def test_include_directives(lexer):
for character in ['"', '|']:
fragment = '#include/**/' + character + 'file.udo' + character
tokens = [
- (Comment.Preproc, u'#include'),
- (Comment.Multiline, u'/**/'),
- (String, character + u'file.udo' + character),
- (Text, u'\n')
+ (Comment.Preproc, '#include'),
+ (Comment.Multiline, '/**/'),
+ (String, character + 'file.udo' + character),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -342,13 +342,13 @@ def test_include_directives(lexer):
def test_includestr_directives(lexer):
fragment = '#includestr/**/"$MACRO..udo"'
tokens = [
- (Comment.Preproc, u'#includestr'),
- (Comment.Multiline, u'/**/'),
- (String, u'"'),
- (Comment.Preproc, u'$MACRO.'),
- (String, u'.udo'),
- (String, u'"'),
- (Text, u'\n')
+ (Comment.Preproc, '#includestr'),
+ (Comment.Multiline, '/**/'),
+ (String, '"'),
+ (Comment.Preproc, '$MACRO.'),
+ (String, '.udo'),
+ (String, '"'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -362,25 +362,25 @@ def test_object_like_macro_definitions(lexer):
body\\##
''')
tokens = [
- (Comment.Preproc, u'# \tdefine'),
- (Text, u' '),
- (Comment.Preproc, u'MACRO'),
- (Punctuation, u'#'),
- (Comment.Preproc, u'macro_body'),
- (Punctuation, u'#'),
- (Text, u'\n'),
- (Comment.Preproc, u'#define'),
- (Comment.Multiline, u'/**/'),
- (Text, u'\n'),
- (Comment.Preproc, u'MACRO'),
- (Comment.Multiline, u'/**/'),
- (Text, u'\n'),
- (Punctuation, u'#'),
- (Comment.Preproc, u'\\#'),
- (Comment.Preproc, u'macro\nbody'),
- (Comment.Preproc, u'\\#'),
- (Punctuation, u'#'),
- (Text, u'\n')
+ (Comment.Preproc, '# \tdefine'),
+ (Text, ' '),
+ (Comment.Preproc, 'MACRO'),
+ (Punctuation, '#'),
+ (Comment.Preproc, 'macro_body'),
+ (Punctuation, '#'),
+ (Text, '\n'),
+ (Comment.Preproc, '#define'),
+ (Comment.Multiline, '/**/'),
+ (Text, '\n'),
+ (Comment.Preproc, 'MACRO'),
+ (Comment.Multiline, '/**/'),
+ (Text, '\n'),
+ (Punctuation, '#'),
+ (Comment.Preproc, '\\#'),
+ (Comment.Preproc, 'macro\nbody'),
+ (Comment.Preproc, '\\#'),
+ (Punctuation, '#'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -394,39 +394,39 @@ def test_function_like_macro_definitions(lexer):
body\\##
''')
tokens = [
- (Comment.Preproc, u'#define'),
- (Text, u' '),
- (Comment.Preproc, u'MACRO'),
- (Punctuation, u'('),
- (Comment.Preproc, u'ARG1'),
- (Punctuation, u'#'),
- (Comment.Preproc, u'ARG2'),
- (Punctuation, u')'),
- (Text, u' '),
- (Punctuation, u'#'),
- (Comment.Preproc, u'macro_body'),
- (Punctuation, u'#'),
- (Text, u'\n'),
- (Comment.Preproc, u'#define'),
- (Comment.Multiline, u'/**/'),
- (Text, u'\n'),
- (Comment.Preproc, u'MACRO'),
- (Punctuation, u'('),
- (Comment.Preproc, u'ARG1'),
- (Punctuation, u"'"),
- (Comment.Preproc, u'ARG2'),
- (Punctuation, u"'"),
- (Text, u' '),
- (Comment.Preproc, u'ARG3'),
- (Punctuation, u')'),
- (Comment.Multiline, u'/**/'),
- (Text, u'\n'),
- (Punctuation, u'#'),
- (Comment.Preproc, u'\\#'),
- (Comment.Preproc, u'macro\nbody'),
- (Comment.Preproc, u'\\#'),
- (Punctuation, u'#'),
- (Text, u'\n')
+ (Comment.Preproc, '#define'),
+ (Text, ' '),
+ (Comment.Preproc, 'MACRO'),
+ (Punctuation, '('),
+ (Comment.Preproc, 'ARG1'),
+ (Punctuation, '#'),
+ (Comment.Preproc, 'ARG2'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, '#'),
+ (Comment.Preproc, 'macro_body'),
+ (Punctuation, '#'),
+ (Text, '\n'),
+ (Comment.Preproc, '#define'),
+ (Comment.Multiline, '/**/'),
+ (Text, '\n'),
+ (Comment.Preproc, 'MACRO'),
+ (Punctuation, '('),
+ (Comment.Preproc, 'ARG1'),
+ (Punctuation, "'"),
+ (Comment.Preproc, 'ARG2'),
+ (Punctuation, "'"),
+ (Text, ' '),
+ (Comment.Preproc, 'ARG3'),
+ (Punctuation, ')'),
+ (Comment.Multiline, '/**/'),
+ (Text, '\n'),
+ (Punctuation, '#'),
+ (Comment.Preproc, '\\#'),
+ (Comment.Preproc, 'macro\nbody'),
+ (Comment.Preproc, '\\#'),
+ (Punctuation, '#'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -436,9 +436,9 @@ def test_macro_preprocessor_directives(lexer):
fragment = directive + ' MACRO'
tokens = [
(Comment.Preproc, directive),
- (Text, u' '),
- (Comment.Preproc, u'MACRO'),
- (Text, u'\n')
+ (Text, ' '),
+ (Comment.Preproc, 'MACRO'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -453,18 +453,18 @@ def test_other_preprocessor_directives(lexer):
@@ \t67890
''')
tokens = [
- (Comment.Preproc, u'#else'),
- (Text, u'\n'),
- (Comment.Preproc, u'#end'),
- (Text, u'\n'),
- (Comment.Preproc, u'#endif'),
- (Text, u'\n'),
- (Comment.Preproc, u'###'),
- (Text, u'\n'),
- (Comment.Preproc, u'@ \t12345'),
- (Text, u'\n'),
- (Comment.Preproc, u'@@ \t67890'),
- (Text, u'\n')
+ (Comment.Preproc, '#else'),
+ (Text, '\n'),
+ (Comment.Preproc, '#end'),
+ (Text, '\n'),
+ (Comment.Preproc, '#endif'),
+ (Text, '\n'),
+ (Comment.Preproc, '###'),
+ (Text, '\n'),
+ (Comment.Preproc, '@ \t12345'),
+ (Text, '\n'),
+ (Comment.Preproc, '@@ \t67890'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -472,42 +472,42 @@ def test_other_preprocessor_directives(lexer):
def test_function_like_macros(lexer):
fragment = "$MACRO.(((x#y\\)))' \"(#'x)\\)x\\))\"# {{x\\))x)\\)(#'}});"
tokens = [
- (Comment.Preproc, u'$MACRO.'),
- (Punctuation, u'('),
- (Comment.Preproc, u'('),
- (Comment.Preproc, u'('),
- (Comment.Preproc, u'x#y\\)'),
- (Comment.Preproc, u')'),
- (Comment.Preproc, u')'),
- (Punctuation, u"'"),
- (Comment.Preproc, u' '),
- (String, u'"'),
- (Error, u'('),
- (Error, u'#'),
- (Error, u"'"),
- (String, u'x'),
- (Error, u')'),
- (Comment.Preproc, u'\\)'),
- (String, u'x'),
- (Comment.Preproc, u'\\)'),
- (Error, u')'),
- (String, u'"'),
- (Punctuation, u'#'),
- (Comment.Preproc, u' '),
- (String, u'{{'),
- (String, u'x'),
- (Comment.Preproc, u'\\)'),
- (Error, u')'),
- (String, u'x'),
- (Error, u')'),
- (Comment.Preproc, u'\\)'),
- (Error, u'('),
- (Error, u'#'),
- (Error, u"'"),
- (String, u'}}'),
- (Punctuation, u')'),
- (Comment.Single, u';'),
- (Text, u'\n')
+ (Comment.Preproc, '$MACRO.'),
+ (Punctuation, '('),
+ (Comment.Preproc, '('),
+ (Comment.Preproc, '('),
+ (Comment.Preproc, 'x#y\\)'),
+ (Comment.Preproc, ')'),
+ (Comment.Preproc, ')'),
+ (Punctuation, "'"),
+ (Comment.Preproc, ' '),
+ (String, '"'),
+ (Error, '('),
+ (Error, '#'),
+ (Error, "'"),
+ (String, 'x'),
+ (Error, ')'),
+ (Comment.Preproc, '\\)'),
+ (String, 'x'),
+ (Comment.Preproc, '\\)'),
+ (Error, ')'),
+ (String, '"'),
+ (Punctuation, '#'),
+ (Comment.Preproc, ' '),
+ (String, '{{'),
+ (String, 'x'),
+ (Comment.Preproc, '\\)'),
+ (Error, ')'),
+ (String, 'x'),
+ (Error, ')'),
+ (Comment.Preproc, '\\)'),
+ (Error, '('),
+ (Error, '#'),
+ (Error, "'"),
+ (String, '}}'),
+ (Punctuation, ')'),
+ (Comment.Single, ';'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_data.py b/tests/test_data.py
index 9724d235..22d4ee79 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -29,29 +29,29 @@ def lexer_yaml():
def test_basic_json(lexer_json):
- fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n'
+ fragment = '{"foo": "bar", "foo2": [1, 2, 3]}\n'
tokens = [
- (Token.Punctuation, u'{'),
- (Token.Name.Tag, u'"foo"'),
- (Token.Punctuation, u':'),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"bar"'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Name.Tag, u'"foo2"'),
- (Token.Punctuation, u':'),
- (Token.Text, u' '),
- (Token.Punctuation, u'['),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Punctuation, u']'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Punctuation, '{'),
+ (Token.Name.Tag, '"foo"'),
+ (Token.Punctuation, ':'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"bar"'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Name.Tag, '"foo2"'),
+ (Token.Punctuation, ':'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Punctuation, ']'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer_json.get_tokens(fragment)) == tokens
@@ -62,7 +62,7 @@ def test_json_escape_backtracking(lexer_json):
# this test will hang and that's how we know it's broken :(
fragment = r'{"\u00D0000\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\63CD'
tokens = (
- [(Token.Punctuation, u'{'),
+ [(Token.Punctuation, '{'),
(Token.Error, r'"'),
(Token.Error, '\\'),
(Token.Error, r'u'),
@@ -87,27 +87,27 @@ def test_json_escape_backtracking(lexer_json):
def test_basic_bare(lexer_bare):
# This is the same as testBasic for JsonLexer above, except the
# enclosing curly braces are removed.
- fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n'
+ fragment = '"foo": "bar", "foo2": [1, 2, 3]\n'
tokens = [
- (Token.Name.Tag, u'"foo"'),
- (Token.Punctuation, u':'),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"bar"'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Name.Tag, u'"foo2"'),
- (Token.Punctuation, u':'),
- (Token.Text, u' '),
- (Token.Punctuation, u'['),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Punctuation, u']'),
- (Token.Text, u'\n'),
+ (Token.Name.Tag, '"foo"'),
+ (Token.Punctuation, ':'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"bar"'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Name.Tag, '"foo2"'),
+ (Token.Punctuation, ':'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Punctuation, ']'),
+ (Token.Text, '\n'),
]
assert list(lexer_bare.get_tokens(fragment)) == tokens
@@ -139,14 +139,14 @@ def test_closing_curly_in_value(lexer_bare):
def test_yaml(lexer_yaml):
# Bug #1528: This previously parsed 'token # innocent' as a tag
- fragment = u'here: token # innocent: comment\n'
+ fragment = 'here: token # innocent: comment\n'
tokens = [
- (Token.Name.Tag, u'here'),
- (Token.Punctuation, u':'),
- (Token.Text, u' '),
- (Token.Literal.Scalar.Plain, u'token'),
- (Token.Text, u' '),
- (Token.Comment.Single, u'# innocent: comment'),
- (Token.Text, u'\n'),
+ (Token.Name.Tag, 'here'),
+ (Token.Punctuation, ':'),
+ (Token.Text, ' '),
+ (Token.Literal.Scalar.Plain, 'token'),
+ (Token.Text, ' '),
+ (Token.Comment.Single, '# innocent: comment'),
+ (Token.Text, '\n'),
]
assert list(lexer_yaml.get_tokens(fragment)) == tokens
diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py
index 22b5ced0..25b355ec 100644
--- a/tests/test_examplefiles.py
+++ b/tests/test_examplefiles.py
@@ -101,8 +101,8 @@ def test_examplefile(filename):
text = text.strip(b'\n') + b'\n'
try:
text = text.decode('utf-8')
- if text.startswith(u'\ufeff'):
- text = text[len(u'\ufeff'):]
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
except UnicodeError:
text = text.decode('latin1')
ntext = []
@@ -113,13 +113,13 @@ def test_examplefile(filename):
ntext.append(val)
assert type != Error, \
'lexer %s generated error token for %s: %r at position %d' % \
- (lx, absfn, val, len(u''.join(ntext)))
+ (lx, absfn, val, len(''.join(ntext)))
tokens.append((type, val))
t2 = time.time()
STATS[os.path.basename(absfn)] = (len(text),
1000 * (t2 - t1), 1000 * (t2 - t1) / len(text))
- if u''.join(ntext) != text:
- print('\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
+ if ''.join(ntext) != text:
+ print('\n'.join(difflib.unified_diff(''.join(ntext).splitlines(),
text.splitlines())))
raise AssertionError('round trip failed for ' + absfn)
diff --git a/tests/test_ezhil.py b/tests/test_ezhil.py
index 8047a30a..beca4c65 100644
--- a/tests/test_ezhil.py
+++ b/tests/test_ezhil.py
@@ -19,71 +19,71 @@ def lexer():
def test_sum(lexer):
- fragment = u'1+3\n'
+ fragment = '1+3\n'
tokens = [
- (Number.Integer, u'1'),
- (Operator, u'+'),
- (Number.Integer, u'3'),
- (Text, u'\n'),
+ (Number.Integer, '1'),
+ (Operator, '+'),
+ (Number.Integer, '3'),
+ (Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_gcd_expr(lexer):
- fragment = u'1^3+(5-5)*gcd(a,b)\n'
+ fragment = '1^3+(5-5)*gcd(a,b)\n'
tokens = [
- (Token.Number.Integer, u'1'),
- (Token.Operator, u'^'),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Operator, u'+'),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'5'),
- (Token.Operator, u'-'),
- (Token.Literal.Number.Integer, u'5'),
- (Token.Punctuation, u')'),
- (Token.Operator, u'*'),
- (Token.Name, u'gcd'),
- (Token.Punctuation, u'('),
- (Token.Name, u'a'),
- (Token.Operator, u','),
- (Token.Name, u'b'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n')
+ (Token.Number.Integer, '1'),
+ (Token.Operator, '^'),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Operator, '+'),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '5'),
+ (Token.Operator, '-'),
+ (Token.Literal.Number.Integer, '5'),
+ (Token.Punctuation, ')'),
+ (Token.Operator, '*'),
+ (Token.Name, 'gcd'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'a'),
+ (Token.Operator, ','),
+ (Token.Name, 'b'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_if_statement(lexer):
- fragment = u"""@( 0 > 3 ) ஆனால்
+ fragment = """@( 0 > 3 ) ஆனால்
பதிப்பி "wont print"
முடி"""
tokens = [
- (Token.Operator, u'@'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Text, u' '),
- (Token.Operator, u'>'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Keyword, u'ஆனால்'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'பதிப்பி'),
- (Token.Text, u' '),
- (Token.Literal.String, u'"wont print"'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'முடி'),
- (Token.Text, u'\n')
+ (Token.Operator, '@'),
+ (Token.Punctuation, '('),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Text, ' '),
+ (Token.Operator, '>'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'ஆனால்'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'பதிப்பி'),
+ (Token.Text, ' '),
+ (Token.Literal.String, '"wont print"'),
+ (Token.Text, '\n'),
+ (Token.Keyword, 'முடி'),
+ (Token.Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_function(lexer):
- fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015
+ fragment = """# (C) முத்தையா அண்ணாமலை 2013, 2015
நிரல்பாகம் gcd ( x, y )
மு = max(x,y)
q = min(x,y)
@@ -95,83 +95,83 @@ def test_function(lexer):
முடி\n"""
tokens = [
(Token.Comment.Single,
- u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85'
- u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'),
- (Token.Keyword, u'நிரல்பாகம்'),
- (Token.Text, u' '),
- (Token.Name, u'gcd'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Text, u' '),
- (Token.Name, u'y'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'max'),
- (Token.Punctuation, u'('),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Name, u'y'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'min'),
- (Token.Punctuation, u'('),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Name, u'y'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u'\n'),
- (Token.Operator, u'@'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u'=='),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Keyword, u'ஆனால்'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'பின்கொடு'),
- (Token.Text, u' '),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'முடி'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'),
- (Token.Text, u' '),
- (Token.Name, u'gcd'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u','),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'முடி'), # u'\u0bae\u0bc1\u0b9f\u0bbf'),
- (Token.Text, u'\n')
+ '# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85'
+ '\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'),
+ (Token.Keyword, 'நிரல்பாகம்'),
+ (Token.Text, ' '),
+ (Token.Name, 'gcd'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Text, ' '),
+ (Token.Name, 'x'),
+ (Token.Operator, ','),
+ (Token.Text, ' '),
+ (Token.Name, 'y'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Name, '\u0bae\u0bc1'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Name.Builtin, 'max'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'x'),
+ (Token.Operator, ','),
+ (Token.Name, 'y'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Name, 'q'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Name.Builtin, 'min'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'x'),
+ (Token.Operator, ','),
+ (Token.Name, 'y'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, '\n'),
+ (Token.Operator, '@'),
+ (Token.Punctuation, '('),
+ (Token.Text, ' '),
+ (Token.Name, 'q'),
+ (Token.Text, ' '),
+ (Token.Operator, '=='),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'ஆனால்'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'பின்கொடு'),
+ (Token.Text, ' '),
+ (Token.Name, '\u0bae\u0bc1'),
+ (Token.Text, '\n'),
+ (Token.Keyword, 'முடி'),
+ (Token.Text, '\n'),
+ (Token.Keyword, '\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'),
+ (Token.Text, ' '),
+ (Token.Name, 'gcd'),
+ (Token.Punctuation, '('),
+ (Token.Text, ' '),
+ (Token.Name, '\u0bae\u0bc1'),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Text, ' '),
+ (Token.Name, 'q'),
+ (Token.Text, ' '),
+ (Token.Operator, ','),
+ (Token.Text, ' '),
+ (Token.Name, 'q'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Keyword, 'முடி'), # '\u0bae\u0bc1\u0b9f\u0bbf'),
+ (Token.Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_grammar_notation.py b/tests/test_grammar_notation.py
index 0d7e6865..9db46e62 100644
--- a/tests/test_grammar_notation.py
+++ b/tests/test_grammar_notation.py
@@ -19,21 +19,21 @@ def lexer_peg():
def test_peg_basic(lexer_peg):
- fragment = u'rule<-("terminal"/nonterminal/[cls])*\n'
+ fragment = 'rule<-("terminal"/nonterminal/[cls])*\n'
tokens = [
- (Token.Name.Class, u'rule'),
- (Token.Operator, u'<-'),
- (Token.Punctuation, u'('),
- (Token.String.Double, u'"terminal"'),
- (Token.Operator, u'/'),
- (Token.Name.Class, u'nonterminal'),
- (Token.Operator, u'/'),
- (Token.Punctuation, u'['),
- (Token.String, u'cls'),
- (Token.Punctuation, u']'),
- (Token.Punctuation, u')'),
- (Token.Operator, u'*'),
- (Token.Text, u'\n'),
+ (Token.Name.Class, 'rule'),
+ (Token.Operator, '<-'),
+ (Token.Punctuation, '('),
+ (Token.String.Double, '"terminal"'),
+ (Token.Operator, '/'),
+ (Token.Name.Class, 'nonterminal'),
+ (Token.Operator, '/'),
+ (Token.Punctuation, '['),
+ (Token.String, 'cls'),
+ (Token.Punctuation, ']'),
+ (Token.Punctuation, ')'),
+ (Token.Operator, '*'),
+ (Token.Text, '\n'),
]
assert list(lexer_peg.get_tokens(fragment)) == tokens
@@ -42,31 +42,31 @@ def test_peg_operators(lexer_peg):
# see for example:
# - https://github.com/gvanrossum/pegen
# - https://nim-lang.org/docs/pegs.html
- fragment = u"rule = 'a' | 'b'\n"
+ fragment = "rule = 'a' | 'b'\n"
tokens = [
- (Token.Name.Class, u'rule'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.String.Single, u"'a'"),
- (Token.Text, u' '),
- (Token.Operator, u'|'),
- (Token.Text, u' '),
- (Token.String.Single, u"'b'"),
- (Token.Text, u'\n'),
+ (Token.Name.Class, 'rule'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.String.Single, "'a'"),
+ (Token.Text, ' '),
+ (Token.Operator, '|'),
+ (Token.Text, ' '),
+ (Token.String.Single, "'b'"),
+ (Token.Text, '\n'),
]
assert list(lexer_peg.get_tokens(fragment)) == tokens
- fragment = u"rule: 'a' ~ 'b'\n"
+ fragment = "rule: 'a' ~ 'b'\n"
tokens = [
- (Token.Name.Class, u'rule'),
- (Token.Operator, u':'),
- (Token.Text, u' '),
- (Token.String.Single, u"'a'"),
- (Token.Text, u' '),
- (Token.Operator, u'~'),
- (Token.Text, u' '),
- (Token.String.Single, u"'b'"),
- (Token.Text, u'\n'),
+ (Token.Name.Class, 'rule'),
+ (Token.Operator, ':'),
+ (Token.Text, ' '),
+ (Token.String.Single, "'a'"),
+ (Token.Text, ' '),
+ (Token.Operator, '~'),
+ (Token.Text, ' '),
+ (Token.String.Single, "'b'"),
+ (Token.Text, '\n'),
]
assert list(lexer_peg.get_tokens(fragment)) == tokens
@@ -76,19 +76,19 @@ def test_peg_modified_strings(lexer_peg):
# - http://textx.github.io/Arpeggio/
# - https://nim-lang.org/docs/pegs.html
# - https://github.com/erikrose/parsimonious
- fragment = u'~"regex" i"insensitive" "multimod"ilx ("not modified")\n'
+ fragment = '~"regex" i"insensitive" "multimod"ilx ("not modified")\n'
tokens = [
# can't handle parsimonious-style regex while ~ is a cut operator
- (Token.Operator, u'~'),
- (Token.String.Double, u'"regex"'),
- (Token.Text, u' '),
- (Token.String.Double, u'i"insensitive"'),
- (Token.Text, u' '),
- (Token.String.Double, u'"multimod"ilx'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.String.Double, u'"not modified"'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Operator, '~'),
+ (Token.String.Double, '"regex"'),
+ (Token.Text, ' '),
+ (Token.String.Double, 'i"insensitive"'),
+ (Token.Text, ' '),
+ (Token.String.Double, '"multimod"ilx'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.String.Double, '"not modified"'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer_peg.get_tokens(fragment)) == tokens
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 0a1b7a9a..7a1c563c 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -214,9 +214,9 @@ def test_get_background_style_defs_uses_multiple_css_prefixes():
def test_unicode_options():
- fmt = HtmlFormatter(title=u'Föö',
- cssclass=u'bär',
- cssstyles=u'div:before { content: \'bäz\' }',
+ fmt = HtmlFormatter(title='Föö',
+ cssclass='bär',
+ cssstyles='div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
with os.fdopen(handle, 'w+b') as tfile:
diff --git a/tests/test_idris.py b/tests/test_idris.py
index 8580bde4..42dcb3cb 100644
--- a/tests/test_idris.py
+++ b/tests/test_idris.py
@@ -18,48 +18,48 @@ def lexer():
yield IdrisLexer()
def test_reserved_word(lexer):
- fragment = u'namespace Foobar\n links : String\n links = "abc"'
+ fragment = 'namespace Foobar\n links : String\n links = "abc"'
tokens = [
- (Keyword.Reserved, u'namespace'),
- (Text, u' '),
- (Keyword.Type, u'Foobar'),
- (Text, u'\n'),
- (Text, u' '),
- (Name.Function, u'links'),
- (Text, u' '),
- (Operator.Word, u':'),
- (Text, u' '),
- (Keyword.Type, u'String'),
- (Text, u'\n'),
- (Text, u' '),
- (Text, u' '),
- (Text, u'links'),
- (Text, u' '),
- (Operator.Word, u'='),
- (Text, u' '),
- (Literal.String, u'"'),
- (Literal.String, u'abc'),
- (Literal.String, u'"'),
- (Text, u'\n')
+ (Keyword.Reserved, 'namespace'),
+ (Text, ' '),
+ (Keyword.Type, 'Foobar'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Name.Function, 'links'),
+ (Text, ' '),
+ (Operator.Word, ':'),
+ (Text, ' '),
+ (Keyword.Type, 'String'),
+ (Text, '\n'),
+ (Text, ' '),
+ (Text, ' '),
+ (Text, 'links'),
+ (Text, ' '),
+ (Operator.Word, '='),
+ (Text, ' '),
+ (Literal.String, '"'),
+ (Literal.String, 'abc'),
+ (Literal.String, '"'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_compiler_directive(lexer):
- fragment = u'%link C "object.o"\n%name Vect xs'
+ fragment = '%link C "object.o"\n%name Vect xs'
tokens = [
- (Keyword.Reserved, u'%link'),
- (Text, u' '),
- (Keyword.Type, u'C'),
- (Text, u' '),
- (Literal.String, u'"'),
- (Literal.String, u'object.o'),
- (Literal.String, u'"'),
- (Text, u'\n'),
- (Keyword.Reserved, u'%name'),
- (Text, u' '),
- (Keyword.Type, u'Vect'),
- (Text, u' '),
- (Text, u'xs'),
- (Text, u'\n')
+ (Keyword.Reserved, '%link'),
+ (Text, ' '),
+ (Keyword.Type, 'C'),
+ (Text, ' '),
+ (Literal.String, '"'),
+ (Literal.String, 'object.o'),
+ (Literal.String, '"'),
+ (Text, '\n'),
+ (Keyword.Reserved, '%name'),
+ (Text, ' '),
+ (Keyword.Type, 'Vect'),
+ (Text, ' '),
+ (Text, 'xs'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
index 6dc43b7d..f93bbad4 100644
--- a/tests/test_irc_formatter.py
+++ b/tests/test_irc_formatter.py
@@ -20,4 +20,4 @@ def test_correct_output():
houtfile = StringIO()
hfmt.format(tokensource, houtfile)
- assert u'\x0302lambda\x03 x: \x0302123\x03\n' == houtfile.getvalue()
+ assert '\x0302lambda\x03 x: \x0302123\x03\n' == houtfile.getvalue()
diff --git a/tests/test_java.py b/tests/test_java.py
index 467a3b72..3baec0ad 100644
--- a/tests/test_java.py
+++ b/tests/test_java.py
@@ -19,23 +19,23 @@ def lexer():
def test_enhanced_for(lexer):
- fragment = u'label:\nfor(String var2: var1) {}\n'
+ fragment = 'label:\nfor(String var2: var1) {}\n'
tokens = [
- (Name.Label, u'label:'),
- (Text, u'\n'),
- (Keyword, u'for'),
- (Punctuation, u'('),
- (Name, u'String'),
- (Text, u' '),
- (Name, u'var2'),
- (Punctuation, u':'),
- (Text, u' '),
- (Name, u'var1'),
- (Punctuation, u')'),
- (Text, u' '),
- (Punctuation, u'{'),
- (Punctuation, u'}'),
- (Text, u'\n'),
+ (Name.Label, 'label:'),
+ (Text, '\n'),
+ (Keyword, 'for'),
+ (Punctuation, '('),
+ (Name, 'String'),
+ (Text, ' '),
+ (Name, 'var2'),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name, 'var1'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, '{'),
+ (Punctuation, '}'),
+ (Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_julia.py b/tests/test_julia.py
index e041377b..14bcdee0 100644
--- a/tests/test_julia.py
+++ b/tests/test_julia.py
@@ -22,38 +22,38 @@ def test_unicode(lexer):
"""
Test that unicode character, √, in an expression is recognized
"""
- fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
+ fragment = 's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
tokens = [
- (Token.Name, u's'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Operator, u'\u221a'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Operator, u'/'),
- (Token.Name, u'n'),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Operator, u'*'),
- (Token.Text, u' '),
- (Token.Name, u'sum'),
- (Token.Punctuation, u'('),
- (Token.Name, u'count'),
- (Token.Text, u' '),
- (Token.Operator, u'.^'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Text, u' '),
- (Token.Name, u'mu'),
- (Token.Text, u' '),
- (Token.Operator, u'.^'),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Name, 's'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Operator, '\u221a'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Operator, '/'),
+ (Token.Name, 'n'),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Operator, '*'),
+ (Token.Text, ' '),
+ (Token.Name, 'sum'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'count'),
+ (Token.Text, ' '),
+ (Token.Operator, '.^'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Text, ' '),
+ (Token.Name, 'mu'),
+ (Token.Text, ' '),
+ (Token.Operator, '.^'),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_kotlin.py b/tests/test_kotlin.py
index 2f0eb376..9b9e898d 100644
--- a/tests/test_kotlin.py
+++ b/tests/test_kotlin.py
@@ -19,115 +19,115 @@ def lexer():
def test_can_cope_with_backtick_names_in_functions(lexer):
- fragment = u'fun `wo bble`'
+ fragment = 'fun `wo bble`'
tokens = [
- (Keyword, u'fun'),
- (Text, u' '),
- (Name.Function, u'`wo bble`'),
- (Text, u'\n')
+ (Keyword, 'fun'),
+ (Text, ' '),
+ (Name.Function, '`wo bble`'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer):
- fragment = u'fun `wo,-bble`'
+ fragment = 'fun `wo,-bble`'
tokens = [
- (Keyword, u'fun'),
- (Text, u' '),
- (Name.Function, u'`wo,-bble`'),
- (Text, u'\n')
+ (Keyword, 'fun'),
+ (Text, ' '),
+ (Name.Function, '`wo,-bble`'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_can_cope_with_destructuring(lexer):
- fragment = u'val (a, b) = '
+ fragment = 'val (a, b) = '
tokens = [
- (Keyword, u'val'),
- (Text, u' '),
- (Punctuation, u'('),
- (Name.Property, u'a'),
- (Punctuation, u','),
- (Text, u' '),
- (Name.Property, u'b'),
- (Punctuation, u')'),
- (Text, u' '),
- (Punctuation, u'='),
- (Text, u' '),
- (Text, u'\n')
+ (Keyword, 'val'),
+ (Text, ' '),
+ (Punctuation, '('),
+ (Name.Property, 'a'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Name.Property, 'b'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, '='),
+ (Text, ' '),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_can_cope_generics_in_destructuring(lexer):
- fragment = u'val (a: List<Something>, b: Set<Wobble>) ='
+ fragment = 'val (a: List<Something>, b: Set<Wobble>) ='
tokens = [
- (Keyword, u'val'),
- (Text, u' '),
- (Punctuation, u'('),
- (Name.Property, u'a'),
- (Punctuation, u':'),
- (Text, u' '),
- (Name.Property, u'List'),
- (Punctuation, u'<'),
- (Name, u'Something'),
- (Punctuation, u'>'),
- (Punctuation, u','),
- (Text, u' '),
- (Name.Property, u'b'),
- (Punctuation, u':'),
- (Text, u' '),
- (Name.Property, u'Set'),
- (Punctuation, u'<'),
- (Name, u'Wobble'),
- (Punctuation, u'>'),
- (Punctuation, u')'),
- (Text, u' '),
- (Punctuation, u'='),
- (Text, u'\n')
+ (Keyword, 'val'),
+ (Text, ' '),
+ (Punctuation, '('),
+ (Name.Property, 'a'),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name.Property, 'List'),
+ (Punctuation, '<'),
+ (Name, 'Something'),
+ (Punctuation, '>'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (Name.Property, 'b'),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name.Property, 'Set'),
+ (Punctuation, '<'),
+ (Name, 'Wobble'),
+ (Punctuation, '>'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, '='),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_can_cope_with_generics(lexer):
- fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'
+ fragment = 'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'
tokens = [
- (Keyword, u'inline fun'),
- (Text, u' '),
- (Punctuation, u'<'),
- (Keyword, u'reified'),
- (Text, u' '),
- (Name, u'T'),
- (Text, u' '),
- (Punctuation, u':'),
- (Text, u' '),
- (Name, u'ContractState'),
- (Punctuation, u'>'),
- (Text, u' '),
- (Name.Class, u'VaultService'),
- (Punctuation, u'.'),
- (Name.Function, u'queryBy'),
- (Punctuation, u'('),
- (Punctuation, u')'),
- (Punctuation, u':'),
- (Text, u' '),
- (Name, u'Vault'),
- (Punctuation, u'.'),
- (Name, u'Page'),
- (Punctuation, u'<'),
- (Name, u'T'),
- (Punctuation, u'>'),
- (Text, u' '),
- (Punctuation, u'{'),
- (Text, u'\n')
+ (Keyword, 'inline fun'),
+ (Text, ' '),
+ (Punctuation, '<'),
+ (Keyword, 'reified'),
+ (Text, ' '),
+ (Name, 'T'),
+ (Text, ' '),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name, 'ContractState'),
+ (Punctuation, '>'),
+ (Text, ' '),
+ (Name.Class, 'VaultService'),
+ (Punctuation, '.'),
+ (Name.Function, 'queryBy'),
+ (Punctuation, '('),
+ (Punctuation, ')'),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name, 'Vault'),
+ (Punctuation, '.'),
+ (Name, 'Page'),
+ (Punctuation, '<'),
+ (Name, 'T'),
+ (Punctuation, '>'),
+ (Text, ' '),
+ (Punctuation, '{'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_should_cope_with_multiline_comments(lexer):
- fragment = u'"""\nthis\nis\na\ncomment"""'
+ fragment = '"""\nthis\nis\na\ncomment"""'
tokens = [
- (String, u'"""\nthis\nis\na\ncomment"""'),
- (Text, u'\n')
+ (String, '"""\nthis\nis\na\ncomment"""'),
+ (Text, '\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_objectiveclexer.py b/tests/test_objectiveclexer.py
index 31f833cf..7264bad8 100644
--- a/tests/test_objectiveclexer.py
+++ b/tests/test_objectiveclexer.py
@@ -19,78 +19,78 @@ def lexer():
def test_literal_number_int(lexer):
- fragment = u'@(1);\n'
+ fragment = '@(1);\n'
expected = [
- (Token.Literal, u'@('),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Literal, u')'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Literal, '@('),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Literal, ')'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_literal_number_expression(lexer):
- fragment = u'@(1+2);\n'
+ fragment = '@(1+2);\n'
expected = [
- (Token.Literal, u'@('),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Operator, u'+'),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Literal, u')'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Literal, '@('),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Operator, '+'),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Literal, ')'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_literal_number_nested_expression(lexer):
- fragment = u'@(1+(2+3));\n'
+ fragment = '@(1+(2+3));\n'
expected = [
- (Token.Literal, u'@('),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Operator, u'+'),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Operator, u'+'),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Punctuation, u')'),
- (Token.Literal, u')'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Literal, '@('),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Operator, '+'),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Operator, '+'),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Punctuation, ')'),
+ (Token.Literal, ')'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_literal_number_bool(lexer):
- fragment = u'@NO;\n'
+ fragment = '@NO;\n'
expected = [
- (Token.Literal.Number, u'@NO'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Literal.Number, '@NO'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_literal_number_bool_expression(lexer):
- fragment = u'@(YES);\n'
+ fragment = '@(YES);\n'
expected = [
- (Token.Literal, u'@('),
- (Token.Name.Builtin, u'YES'),
- (Token.Literal, u')'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Literal, '@('),
+ (Token.Name.Builtin, 'YES'),
+ (Token.Literal, ')'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
def test_module_import(lexer):
- fragment = u'@import ModuleA;\n'
+ fragment = '@import ModuleA;\n'
expected = [
- (Token.Keyword, u'@import'),
- (Token.Text, u' '),
- (Token.Name, u'ModuleA'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
+ (Token.Keyword, '@import'),
+ (Token.Text, ' '),
+ (Token.Name, 'ModuleA'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_praat.py b/tests/test_praat.py
index e3997671..cced8983 100644
--- a/tests/test_praat.py
+++ b/tests/test_praat.py
@@ -19,187 +19,195 @@ def lexer():
def test_numeric_assignment(lexer):
- fragment = u'var = -15e4\n'
+ fragment = 'var = -15e4\n'
tokens = [
- (Token.Text, u'var'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Literal.Number, u'15e4'),
- (Token.Text, u'\n'),
+ (Token.Text, 'var'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Literal.Number, '15e4'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def testStringAssignment(lexer):
- fragment = u'var$ = "foo"\n'
+ fragment = 'var$ = "foo"\n'
tokens = [
- (Token.Text, u'var$'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'foo'),
- (Token.Literal.String, u'"'),
- (Token.Text, u'\n'),
+ (Token.Text, 'var$'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, 'foo'),
+ (Token.Literal.String, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_string_escaped_quotes(lexer):
- fragment = u'"it said ""foo"""\n'
+ fragment = '"it said ""foo"""\n'
tokens = [
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'it said '),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'foo'),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'"'),
- (Token.Text, u'\n'),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, 'it said '),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, 'foo'),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_function_call(lexer):
- fragment = u'selected("Sound", i+(a*b))\n'
+ fragment = 'selected("Sound", i+(a*b))\n'
tokens = [
- (Token.Name.Function, u'selected'),
- (Token.Punctuation, u'('),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u'Sound'),
- (Token.Literal.String, u'"'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Text, u'i'),
- (Token.Operator, u'+'),
- (Token.Text, u'('),
- (Token.Text, u'a'),
- (Token.Operator, u'*'),
- (Token.Text, u'b'),
- (Token.Text, u')'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Name.Function, 'selected'),
+ (Token.Punctuation, '('),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, 'Sound'),
+ (Token.Literal.String, '"'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Text, 'i'),
+ (Token.Operator, '+'),
+ (Token.Text, '('),
+ (Token.Text, 'a'),
+ (Token.Operator, '*'),
+ (Token.Text, 'b'),
+ (Token.Text, ')'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_broken_unquoted_string(lexer):
- fragment = u'printline string\n... \'interpolated\' string\n'
+ fragment = 'printline string\n... \'interpolated\' string\n'
tokens = [
- (Token.Keyword, u'printline'),
- (Token.Text, u' '),
- (Token.Literal.String, u'string'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'...'),
- (Token.Text, u' '),
- (Token.Literal.String.Interpol, u"'interpolated'"),
- (Token.Text, u' '),
- (Token.Literal.String, u'string'),
- (Token.Text, u'\n'),
+ (Token.Keyword, 'printline'),
+ (Token.Text, ' '),
+ (Token.Literal.String, 'string'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '...'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Interpol, "'interpolated'"),
+ (Token.Text, ' '),
+ (Token.Literal.String, 'string'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_inline_if(lexer):
- fragment = u'var = if true == 1 then -1 else 0 fi'
+ fragment = 'var = if true == 1 then -1 else 0 fi'
tokens = [
- (Token.Text, u'var'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Keyword, u'if'),
- (Token.Text, u' '),
- (Token.Text, u'true'),
- (Token.Text, u' '),
- (Token.Operator, u'=='),
- (Token.Text, u' '),
- (Token.Literal.Number, u'1'),
- (Token.Text, u' '),
- (Token.Keyword, u'then'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Literal.Number, u'1'),
- (Token.Text, u' '),
- (Token.Keyword, u'else'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'0'),
- (Token.Text, u' '),
- (Token.Keyword, u'fi'),
- (Token.Text, u'\n'),
+ (Token.Text, 'var'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Keyword, 'if'),
+ (Token.Text, ' '),
+ (Token.Text, 'true'),
+ (Token.Text, ' '),
+ (Token.Operator, '=='),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '1'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'then'),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Literal.Number, '1'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'else'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '0'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'fi'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolation_boundary(lexer):
- fragment = u'"\'" + "\'"'
+ fragment = '"\'" + "\'"'
tokens = [
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u"'"),
- (Token.Literal.String, u'"'),
- (Token.Text, u' '),
- (Token.Operator, u'+'),
- (Token.Text, u' '),
- (Token.Literal.String, u'"'),
- (Token.Literal.String, u"'"),
- (Token.Literal.String, u'"'),
- (Token.Text, u'\n'),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, "'"),
+ (Token.Literal.String, '"'),
+ (Token.Text, ' '),
+ (Token.Operator, '+'),
+ (Token.Text, ' '),
+ (Token.Literal.String, '"'),
+ (Token.Literal.String, "'"),
+ (Token.Literal.String, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_numeric_indexed(lexer):
- fragment = u"'a[3]'"
+ fragment = "'a[3]'"
tokens = [
- (Token.Literal.String.Interpol, u"'a[3]'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a[3]'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_numeric_hash(lexer):
- fragment = u"'a[\"b\"]'"
+ fragment = "'a[\"b\"]'"
tokens = [
- (Token.Literal.String.Interpol, u"'a[\"b\"]'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a[\"b\"]'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_string_indexed(lexer):
- fragment = u"'a$[3]'"
+ fragment = "'a$[3]'"
tokens = [
- (Token.Literal.String.Interpol, u"'a$[3]'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a$[3]'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_string_hash(lexer):
- fragment = u"'a$[\"b\"]'"
+ fragment = "'a$[\"b\"]'"
tokens = [
- (Token.Literal.String.Interpol, u"'a$[\"b\"]'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a$[\"b\"]'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_numeric_with_precision(lexer):
- fragment = u"'a:3'"
+ fragment = "'a:3'"
tokens = [
- (Token.Literal.String.Interpol, u"'a:3'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a:3'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_indexed_numeric_with_precision(lexer):
- fragment = u"'a[3]:3'"
+ fragment = "'a[3]:3'"
tokens = [
- (Token.Literal.String.Interpol, u"'a[3]:3'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a[3]:3'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
+
def test_interpolated_local_numeric_with_precision(lexer):
- fragment = u"'a.a:3'"
+ fragment = "'a.a:3'"
tokens = [
- (Token.Literal.String.Interpol, u"'a.a:3'"),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Interpol, "'a.a:3'"),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_promql.py b/tests/test_promql.py
index cd02a57a..3b3dce91 100644
--- a/tests/test_promql.py
+++ b/tests/test_promql.py
@@ -19,7 +19,7 @@ def lexer():
def test_metric(lexer):
- fragment = u"go_gc_duration_seconds"
+ fragment = "go_gc_duration_seconds"
tokens = [
(Token.Name.Variable, "go_gc_duration_seconds"),
(Token.Text.Whitespace, "\n"),
@@ -28,7 +28,7 @@ def test_metric(lexer):
def test_metric_one_label(lexer):
- fragment = u'go_gc_duration_seconds{instance="localhost:9090"}'
+ fragment = 'go_gc_duration_seconds{instance="localhost:9090"}'
tokens = [
(Token.Name.Variable, "go_gc_duration_seconds"),
(Token.Punctuation, "{"),
@@ -44,7 +44,7 @@ def test_metric_one_label(lexer):
def test_metric_multiple_labels(lexer):
- fragment = u'go_gc_duration_seconds{instance="localhost:9090",job="alertmanager"}'
+ fragment = 'go_gc_duration_seconds{instance="localhost:9090",job="alertmanager"}'
tokens = [
(Token.Name.Variable, "go_gc_duration_seconds"),
(Token.Punctuation, "{"),
@@ -66,7 +66,7 @@ def test_metric_multiple_labels(lexer):
def test_metric_multiple_labels_with_spaces(lexer):
- fragment = u'go_gc_duration_seconds{ instance="localhost:9090", job="alertmanager" }'
+ fragment = 'go_gc_duration_seconds{ instance="localhost:9090", job="alertmanager" }'
tokens = [
(Token.Name.Variable, "go_gc_duration_seconds"),
(Token.Punctuation, "{"),
@@ -91,7 +91,7 @@ def test_metric_multiple_labels_with_spaces(lexer):
def test_expression_and_comment(lexer):
- fragment = u'go_gc_duration_seconds{instance="localhost:9090"} # single comment\n'
+ fragment = 'go_gc_duration_seconds{instance="localhost:9090"} # single comment\n'
tokens = [
(Token.Name.Variable, "go_gc_duration_seconds"),
(Token.Punctuation, "{"),
@@ -109,7 +109,7 @@ def test_expression_and_comment(lexer):
def test_function_delta(lexer):
- fragment = u'delta(cpu_temp_celsius{host="zeus"}[2h])'
+ fragment = 'delta(cpu_temp_celsius{host="zeus"}[2h])'
tokens = [
(Token.Keyword.Reserved, "delta"),
(Token.Operator, "("),
@@ -131,7 +131,7 @@ def test_function_delta(lexer):
def test_function_sum_with_args(lexer):
- fragment = u"sum by (app, proc) (instance_memory_usage_bytes)\n"
+ fragment = 'sum by (app, proc) (instance_memory_usage_bytes)\n'
tokens = [
(Token.Keyword, "sum"),
(Token.Text.Whitespace, " "),
@@ -153,7 +153,7 @@ def test_function_sum_with_args(lexer):
def test_function_multi_line(lexer):
- fragment = u"""label_replace(
+ fragment = """label_replace(
sum by (instance) (
irate(node_disk_read_bytes_total[2m])
) / 1024 / 1024,
@@ -229,7 +229,7 @@ def test_function_multi_line(lexer):
def test_function_multi_line_with_offset(lexer):
- fragment = u"""label_replace(
+ fragment = """label_replace(
avg by(instance)
(irate(node_cpu_seconds_total{mode = "idle"}[5m] offset 3s)
) * 100,
diff --git a/tests/test_python.py b/tests/test_python.py
index 8e53677b..73993936 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -61,57 +61,57 @@ def test_needs_name(lexer3):
"""
Tests that '@' is recognized as an Operator
"""
- fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+ fragment = 'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
tokens = [
- (Token.Name, u'S'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Name, u'H'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Name, u'beta'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Text, u' '),
- (Token.Name, u'r'),
- (Token.Punctuation, u')'),
- (Token.Operator, u'.'),
- (Token.Name, u'T'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Name, u'inv'),
- (Token.Punctuation, u'('),
- (Token.Name, u'H'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Name, u'V'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Name, u'H'),
- (Token.Operator, u'.'),
- (Token.Name, u'T'),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Name, u'H'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Text, u' '),
- (Token.Name, u'beta'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Text, u' '),
- (Token.Name, u'r'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Name, 'S'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Name, 'H'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Name, 'beta'),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Text, ' '),
+ (Token.Name, 'r'),
+ (Token.Punctuation, ')'),
+ (Token.Operator, '.'),
+ (Token.Name, 'T'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Name, 'inv'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'H'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Name, 'V'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Name, 'H'),
+ (Token.Operator, '.'),
+ (Token.Name, 'T'),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Name, 'H'),
+ (Token.Text, ' '),
+ (Token.Operator, '@'),
+ (Token.Text, ' '),
+ (Token.Name, 'beta'),
+ (Token.Text, ' '),
+ (Token.Operator, '-'),
+ (Token.Text, ' '),
+ (Token.Name, 'r'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer3.get_tokens(fragment)) == tokens
@@ -121,18 +121,18 @@ def test_pep_515(lexer3):
Tests that the lexer can parse numeric literals with underscores
"""
fragments = (
- (Token.Literal.Number.Integer, u'1_000_000'),
- (Token.Literal.Number.Float, u'1_000.000_001'),
- (Token.Literal.Number.Float, u'1_000e1_000j'),
- (Token.Literal.Number.Hex, u'0xCAFE_F00D'),
- (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
- (Token.Literal.Number.Oct, u'0o_777_123'),
+ (Token.Literal.Number.Integer, '1_000_000'),
+ (Token.Literal.Number.Float, '1_000.000_001'),
+ (Token.Literal.Number.Float, '1_000e1_000j'),
+ (Token.Literal.Number.Hex, '0xCAFE_F00D'),
+ (Token.Literal.Number.Bin, '0b_0011_1111_0100_1110'),
+ (Token.Literal.Number.Oct, '0o_777_123'),
)
for token, fragment in fragments:
tokens = [
(token, fragment),
- (Token.Text, u'\n'),
+ (Token.Text, '\n'),
]
assert list(lexer3.get_tokens(fragment)) == tokens
@@ -141,7 +141,7 @@ def test_walrus_operator(lexer3):
"""
Tests that ':=' is recognized as an Operator
"""
- fragment = u'if (a := 2) > 4:'
+ fragment = 'if (a := 2) > 4:'
tokens = [
(Token.Keyword, 'if'),
(Token.Text, ' '),
@@ -191,7 +191,7 @@ def test_fstring(lexer3):
(Token.Literal.String.Interpol, '}'),
(Token.Literal.String.Single, '.'),
(Token.Literal.String.Single, "'"),
- (Token.Text, u'\n')
+ (Token.Text, '\n')
]
), (
"f'He said his name is {name!r}.'\n",
diff --git a/tests/test_qbasiclexer.py b/tests/test_qbasiclexer.py
index fb721437..83077e7e 100644
--- a/tests/test_qbasiclexer.py
+++ b/tests/test_qbasiclexer.py
@@ -19,23 +19,23 @@ def lexer():
def test_keywords_with_dollar(lexer):
- fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
+ fragment = 'DIM x\nx = RIGHT$("abc", 1)\n'
expected = [
- (Token.Keyword.Declaration, u'DIM'),
- (Token.Text.Whitespace, u' '),
- (Token.Name.Variable.Global, u'x'),
- (Token.Text, u'\n'),
- (Token.Name.Variable.Global, u'x'),
- (Token.Text.Whitespace, u' '),
- (Token.Operator, u'='),
- (Token.Text.Whitespace, u' '),
- (Token.Keyword.Reserved, u'RIGHT$'),
- (Token.Punctuation, u'('),
- (Token.Literal.String.Double, u'"abc"'),
- (Token.Punctuation, u','),
- (Token.Text.Whitespace, u' '),
- (Token.Literal.Number.Integer.Long, u'1'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Declaration, 'DIM'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Name.Variable.Global, 'x'),
+ (Token.Text, '\n'),
+ (Token.Name.Variable.Global, 'x'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Operator, '='),
+ (Token.Text.Whitespace, ' '),
+ (Token.Keyword.Reserved, 'RIGHT$'),
+ (Token.Punctuation, '('),
+ (Token.Literal.String.Double, '"abc"'),
+ (Token.Punctuation, ','),
+ (Token.Text.Whitespace, ' '),
+ (Token.Literal.Number.Integer.Long, '1'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_r.py b/tests/test_r.py
index 2814acd7..c243652b 100644
--- a/tests/test_r.py
+++ b/tests/test_r.py
@@ -19,76 +19,76 @@ def lexer():
def test_call(lexer):
- fragment = u'f(1, a)\n'
+ fragment = 'f(1, a)\n'
tokens = [
- (Name.Function, u'f'),
- (Punctuation, u'('),
- (Token.Literal.Number, u'1'),
- (Punctuation, u','),
- (Token.Text, u' '),
- (Token.Name, u'a'),
- (Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Name.Function, 'f'),
+ (Punctuation, '('),
+ (Token.Literal.Number, '1'),
+ (Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Name, 'a'),
+ (Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name1(lexer):
- fragment = u'._a_2.c'
+ fragment = '._a_2.c'
tokens = [
- (Name, u'._a_2.c'),
- (Token.Text, u'\n'),
+ (Name, '._a_2.c'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name2(lexer):
# Invalid names are valid if backticks are used
- fragment = u'`.1 blah`'
+ fragment = '`.1 blah`'
tokens = [
- (Name, u'`.1 blah`'),
- (Token.Text, u'\n'),
+ (Name, '`.1 blah`'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name3(lexer):
# Internal backticks can be escaped
- fragment = u'`.1 \\` blah`'
+ fragment = '`.1 \\` blah`'
tokens = [
- (Name, u'`.1 \\` blah`'),
- (Token.Text, u'\n'),
+ (Name, '`.1 \\` blah`'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_custom_operator(lexer):
- fragment = u'7 % and % 8'
+ fragment = '7 % and % 8'
tokens = [
- (Token.Literal.Number, u'7'),
- (Token.Text, u' '),
- (Token.Operator, u'% and %'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'8'),
- (Token.Text, u'\n'),
+ (Token.Literal.Number, '7'),
+ (Token.Text, ' '),
+ (Token.Operator, '% and %'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '8'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_indexing(lexer):
- fragment = u'a[1]'
+ fragment = 'a[1]'
tokens = [
- (Token.Name, u'a'),
- (Token.Punctuation, u'['),
- (Token.Literal.Number, u'1'),
- (Token.Punctuation, u']'),
- (Token.Text, u'\n'),
+ (Token.Name, 'a'),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number, '1'),
+ (Token.Punctuation, ']'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_dot_name(lexer):
- fragment = u'. <- 1'
+ fragment = '. <- 1'
tokens = [
(Token.Name, '.'),
(Token.Text, ' '),
@@ -101,12 +101,12 @@ def test_dot_name(lexer):
def test_dot_indexing(lexer):
- fragment = u'.[1]'
+ fragment = '.[1]'
tokens = [
- (Token.Name, u'.'),
- (Token.Punctuation, u'['),
- (Token.Literal.Number, u'1'),
- (Token.Punctuation, u']'),
- (Token.Text, u'\n'),
+ (Token.Name, '.'),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number, '1'),
+ (Token.Punctuation, ']'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index 8e55696c..4029db1d 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -48,7 +48,7 @@ def test_tuple(lexer):
def test_multiline(lexer):
toks = list(lexer.get_tokens_unprocessed('a\ne'))
assert toks == [
- (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')]
+ (0, Text.Root, 'a'), (1, Text, '\n'), (2, Text.Root, 'e')]
def test_default(lexer):
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index 6f60d06e..dc4fffb4 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -27,10 +27,10 @@ def _build_message(*args, **kwargs):
result = _escape(kwargs.get('result', ''))
if string is None:
- string = (u"The expected output of '{t}'\n"
- u"\t\tShould be '{expected}'\n"
- u"\t\tActually outputs '{result}'\n"
- u"\t(WARNING: Partial Output of Result!)")
+ string = ("The expected output of '{t}'\n"
+ "\t\tShould be '{expected}'\n"
+ "\t\tActually outputs '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)")
end = -len(_escape(foot))
start = end - len(expected)
@@ -51,31 +51,31 @@ def format_rtf(t):
def test_rtf_header():
- t = u''
+ t = ''
result = format_rtf(t)
expected = r'{\rtf1\ansi\uc0'
- msg = (u"RTF documents are expected to start with '{expected}'\n"
- u"\t\tStarts intead with '{result}'\n"
- u"\t(WARNING: Partial Output of Result!)".format(
+ msg = ("RTF documents are expected to start with '{expected}'\n"
+ "\t\tStarts intead with '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)".format(
expected=expected,
result=result[:len(expected)]))
assert result.startswith(expected), msg
def test_rtf_footer():
- t = u''
+ t = ''
result = format_rtf(t)
expected = ''
- msg = (u"RTF documents are expected to end with '{expected}'\n"
- u"\t\tEnds intead with '{result}'\n"
- u"\t(WARNING: Partial Output of Result!)".format(
+ msg = ("RTF documents are expected to end with '{expected}'\n"
+ "\t\tEnds intead with '{result}'\n"
+ "\t(WARNING: Partial Output of Result!)".format(
expected=_escape(expected),
result=_escape(result[-len(expected):])))
assert result.endswith(expected+foot), msg
def test_ascii_characters():
- t = u'a b c d ~'
+ t = 'a b c d ~'
result = format_rtf(t)
expected = (r'a b c d ~')
msg = _build_message(t=t, result=result, expected=expected)
@@ -83,7 +83,7 @@ def test_ascii_characters():
def test_escape_characters():
- t = u'\\ {{'
+ t = '\\ {{'
result = format_rtf(t)
expected = r'\\ \{\{'
msg = _build_message(t=t, result=result, expected=expected)
@@ -91,7 +91,7 @@ def test_escape_characters():
def test_single_characters():
- t = u'â € ¤ каждой'
+ t = 'â € ¤ каждой'
result = format_rtf(t)
expected = (r'{\u226} {\u8364} {\u164} '
r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
@@ -100,7 +100,7 @@ def test_single_characters():
def test_double_characters():
- t = u'က 힣 ↕ ↕︎ 鼖'
+ t = 'က 힣 ↕ ↕︎ 鼖'
result = format_rtf(t)
expected = (r'{\u4096} {\u55203} {\u8597} '
r'{\u8597}{\u65038} {\u55422}{\u56859}')
diff --git a/tests/test_ruby.py b/tests/test_ruby.py
index 86a9ee77..24e3bef1 100644
--- a/tests/test_ruby.py
+++ b/tests/test_ruby.py
@@ -19,131 +19,131 @@ def lexer():
def test_range_syntax1(lexer):
- fragment = u'1..3\n'
+ fragment = '1..3\n'
tokens = [
- (Number.Integer, u'1'),
- (Operator, u'..'),
- (Number.Integer, u'3'),
- (Text, u'\n'),
+ (Number.Integer, '1'),
+ (Operator, '..'),
+ (Number.Integer, '3'),
+ (Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_range_syntax2(lexer):
- fragment = u'1...3\n'
+ fragment = '1...3\n'
tokens = [
- (Number.Integer, u'1'),
- (Operator, u'...'),
- (Number.Integer, u'3'),
- (Text, u'\n'),
+ (Number.Integer, '1'),
+ (Operator, '...'),
+ (Number.Integer, '3'),
+ (Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_range_syntax3(lexer):
- fragment = u'1 .. 3\n'
+ fragment = '1 .. 3\n'
tokens = [
- (Number.Integer, u'1'),
- (Text, u' '),
- (Operator, u'..'),
- (Text, u' '),
- (Number.Integer, u'3'),
- (Text, u'\n'),
+ (Number.Integer, '1'),
+ (Text, ' '),
+ (Operator, '..'),
+ (Text, ' '),
+ (Number.Integer, '3'),
+ (Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_interpolation_nested_curly(lexer):
fragment = (
- u'"A#{ (3..5).group_by { |x| x/2}.map '
- u'do |k,v| "#{k}" end.join }" + "Z"\n')
+ '"A#{ (3..5).group_by { |x| x/2}.map '
+ 'do |k,v| "#{k}" end.join }" + "Z"\n')
tokens = [
- (Token.Literal.String.Double, u'"'),
- (Token.Literal.String.Double, u'A'),
- (Token.Literal.String.Interpol, u'#{'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Operator, u'..'),
- (Token.Literal.Number.Integer, u'5'),
- (Token.Punctuation, u')'),
- (Token.Operator, u'.'),
- (Token.Name, u'group_by'),
- (Token.Text, u' '),
- (Token.Literal.String.Interpol, u'{'),
- (Token.Text, u' '),
- (Token.Operator, u'|'),
- (Token.Name, u'x'),
- (Token.Operator, u'|'),
- (Token.Text, u' '),
- (Token.Name, u'x'),
- (Token.Operator, u'/'),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Literal.String.Interpol, u'}'),
- (Token.Operator, u'.'),
- (Token.Name, u'map'),
- (Token.Text, u' '),
- (Token.Keyword, u'do'),
- (Token.Text, u' '),
- (Token.Operator, u'|'),
- (Token.Name, u'k'),
- (Token.Punctuation, u','),
- (Token.Name, u'v'),
- (Token.Operator, u'|'),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"'),
- (Token.Literal.String.Interpol, u'#{'),
- (Token.Name, u'k'),
- (Token.Literal.String.Interpol, u'}'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u' '),
- (Token.Keyword, u'end'),
- (Token.Operator, u'.'),
- (Token.Name, u'join'),
- (Token.Text, u' '),
- (Token.Literal.String.Interpol, u'}'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u' '),
- (Token.Operator, u'+'),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"'),
- (Token.Literal.String.Double, u'Z'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'\n'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Literal.String.Double, 'A'),
+ (Token.Literal.String.Interpol, '#{'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '3'),
+ (Token.Operator, '..'),
+ (Token.Literal.Number.Integer, '5'),
+ (Token.Punctuation, ')'),
+ (Token.Operator, '.'),
+ (Token.Name, 'group_by'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Interpol, '{'),
+ (Token.Text, ' '),
+ (Token.Operator, '|'),
+ (Token.Name, 'x'),
+ (Token.Operator, '|'),
+ (Token.Text, ' '),
+ (Token.Name, 'x'),
+ (Token.Operator, '/'),
+ (Token.Literal.Number.Integer, '2'),
+ (Token.Literal.String.Interpol, '}'),
+ (Token.Operator, '.'),
+ (Token.Name, 'map'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'do'),
+ (Token.Text, ' '),
+ (Token.Operator, '|'),
+ (Token.Name, 'k'),
+ (Token.Punctuation, ','),
+ (Token.Name, 'v'),
+ (Token.Operator, '|'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"'),
+ (Token.Literal.String.Interpol, '#{'),
+ (Token.Name, 'k'),
+ (Token.Literal.String.Interpol, '}'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'end'),
+ (Token.Operator, '.'),
+ (Token.Name, 'join'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Interpol, '}'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, ' '),
+ (Token.Operator, '+'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"'),
+ (Token.Literal.String.Double, 'Z'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_operator_methods(lexer):
- fragment = u'x.==4\n'
+ fragment = 'x.==4\n'
tokens = [
- (Token.Name, u'x'),
- (Token.Operator, u'.'),
- (Token.Name.Operator, u'=='),
- (Token.Literal.Number.Integer, u'4'),
- (Token.Text, u'\n'),
+ (Token.Name, 'x'),
+ (Token.Operator, '.'),
+ (Token.Name.Operator, '=='),
+ (Token.Literal.Number.Integer, '4'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_escaped_bracestring(lexer):
- fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
+ fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n'
tokens = [
- (Token.Name, u'str'),
- (Token.Operator, u'.'),
- (Token.Name, u'gsub'),
- (Token.Punctuation, u'('),
- (Token.Literal.String.Regex, u'%r{'),
- (Token.Literal.String.Regex, u'\\\\'),
- (Token.Literal.String.Regex, u'\\\\'),
- (Token.Literal.String.Regex, u'}'),
- (Token.Punctuation, u','),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"'),
- (Token.Literal.String.Double, u'/'),
- (Token.Literal.String.Double, u'"'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Token.Name, 'str'),
+ (Token.Operator, '.'),
+ (Token.Name, 'gsub'),
+ (Token.Punctuation, '('),
+ (Token.Literal.String.Regex, '%r{'),
+ (Token.Literal.String.Regex, '\\\\'),
+ (Token.Literal.String.Regex, '\\\\'),
+ (Token.Literal.String.Regex, '}'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"'),
+ (Token.Literal.String.Double, '/'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_shell.py b/tests/test_shell.py
index 79611f16..a39c0f20 100644
--- a/tests/test_shell.py
+++ b/tests/test_shell.py
@@ -34,193 +34,193 @@ def lexer_powershell_session():
def test_curly_no_escape_and_quotes(lexer_bash):
- fragment = u'echo "${a//["b"]/}"\n'
+ fragment = 'echo "${a//["b"]/}"\n'
tokens = [
- (Token.Name.Builtin, u'echo'),
- (Token.Text, u' '),
- (Token.Literal.String.Double, u'"'),
- (Token.String.Interpol, u'${'),
- (Token.Name.Variable, u'a'),
- (Token.Punctuation, u'//['),
- (Token.Literal.String.Double, u'"b"'),
- (Token.Punctuation, u']/'),
- (Token.String.Interpol, u'}'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'\n'),
+ (Token.Name.Builtin, 'echo'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Double, '"'),
+ (Token.String.Interpol, '${'),
+ (Token.Name.Variable, 'a'),
+ (Token.Punctuation, '//['),
+ (Token.Literal.String.Double, '"b"'),
+ (Token.Punctuation, ']/'),
+ (Token.String.Interpol, '}'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_curly_with_escape(lexer_bash):
- fragment = u'echo ${a//[\\"]/}\n'
+ fragment = 'echo ${a//[\\"]/}\n'
tokens = [
- (Token.Name.Builtin, u'echo'),
- (Token.Text, u' '),
- (Token.String.Interpol, u'${'),
- (Token.Name.Variable, u'a'),
- (Token.Punctuation, u'//['),
- (Token.Literal.String.Escape, u'\\"'),
- (Token.Punctuation, u']/'),
- (Token.String.Interpol, u'}'),
- (Token.Text, u'\n'),
+ (Token.Name.Builtin, 'echo'),
+ (Token.Text, ' '),
+ (Token.String.Interpol, '${'),
+ (Token.Name.Variable, 'a'),
+ (Token.Punctuation, '//['),
+ (Token.Literal.String.Escape, '\\"'),
+ (Token.Punctuation, ']/'),
+ (Token.String.Interpol, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_parsed_single(lexer_bash):
- fragment = u"a=$'abc\\''\n"
+ fragment = "a=$'abc\\''\n"
tokens = [
- (Token.Name.Variable, u'a'),
- (Token.Operator, u'='),
- (Token.Literal.String.Single, u"$'abc\\''"),
- (Token.Text, u'\n'),
+ (Token.Name.Variable, 'a'),
+ (Token.Operator, '='),
+ (Token.Literal.String.Single, "$'abc\\''"),
+ (Token.Text, '\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_short_variable_names(lexer_bash):
- fragment = u'x="$"\ny="$_"\nz="$abc"\n'
+ fragment = 'x="$"\ny="$_"\nz="$abc"\n'
tokens = [
# single lone $
- (Token.Name.Variable, u'x'),
- (Token.Operator, u'='),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'$'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'\n'),
+ (Token.Name.Variable, 'x'),
+ (Token.Operator, '='),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '$'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '\n'),
# single letter shell var
- (Token.Name.Variable, u'y'),
- (Token.Operator, u'='),
- (Token.Literal.String.Double, u'"'),
- (Token.Name.Variable, u'$_'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'\n'),
+ (Token.Name.Variable, 'y'),
+ (Token.Operator, '='),
+ (Token.Literal.String.Double, '"'),
+ (Token.Name.Variable, '$_'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '\n'),
# multi-letter user var
- (Token.Name.Variable, u'z'),
- (Token.Operator, u'='),
- (Token.Literal.String.Double, u'"'),
- (Token.Name.Variable, u'$abc'),
- (Token.Literal.String.Double, u'"'),
- (Token.Text, u'\n'),
+ (Token.Name.Variable, 'z'),
+ (Token.Operator, '='),
+ (Token.Literal.String.Double, '"'),
+ (Token.Name.Variable, '$abc'),
+ (Token.Literal.String.Double, '"'),
+ (Token.Text, '\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_array_nums(lexer_bash):
- fragment = u'a=(1 2 3)\n'
+ fragment = 'a=(1 2 3)\n'
tokens = [
- (Token.Name.Variable, u'a'),
- (Token.Operator, u'='),
- (Token.Operator, u'('),
- (Token.Literal.Number, u'1'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'2'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'3'),
- (Token.Operator, u')'),
- (Token.Text, u'\n'),
+ (Token.Name.Variable, 'a'),
+ (Token.Operator, '='),
+ (Token.Operator, '('),
+ (Token.Literal.Number, '1'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '2'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '3'),
+ (Token.Operator, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_end_of_line_nums(lexer_bash):
- fragment = u'a=1\nb=2 # comment\n'
+ fragment = 'a=1\nb=2 # comment\n'
tokens = [
- (Token.Name.Variable, u'a'),
- (Token.Operator, u'='),
- (Token.Literal.Number, u'1'),
- (Token.Text, u'\n'),
- (Token.Name.Variable, u'b'),
- (Token.Operator, u'='),
- (Token.Literal.Number, u'2'),
- (Token.Text, u' '),
- (Token.Comment.Single, u'# comment\n'),
+ (Token.Name.Variable, 'a'),
+ (Token.Operator, '='),
+ (Token.Literal.Number, '1'),
+ (Token.Text, '\n'),
+ (Token.Name.Variable, 'b'),
+ (Token.Operator, '='),
+ (Token.Literal.Number, '2'),
+ (Token.Text, ' '),
+ (Token.Comment.Single, '# comment\n'),
]
assert list(lexer_bash.get_tokens(fragment)) == tokens
def test_newline_in_echo(lexer_session):
- fragment = u'$ echo \\\nhi\nhi\n'
+ fragment = '$ echo \\\nhi\nhi\n'
tokens = [
- (Token.Text, u''),
- (Token.Generic.Prompt, u'$'),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'echo'),
- (Token.Text, u' '),
- (Token.Literal.String.Escape, u'\\\n'),
- (Token.Text, u'hi'),
- (Token.Text, u'\n'),
- (Token.Generic.Output, u'hi\n'),
+ (Token.Text, ''),
+ (Token.Generic.Prompt, '$'),
+ (Token.Text, ' '),
+ (Token.Name.Builtin, 'echo'),
+ (Token.Text, ' '),
+ (Token.Literal.String.Escape, '\\\n'),
+ (Token.Text, 'hi'),
+ (Token.Text, '\n'),
+ (Token.Generic.Output, 'hi\n'),
]
assert list(lexer_session.get_tokens(fragment)) == tokens
def test_msdos_gt_only(lexer_msdos):
- fragment = u'> py\nhi\n'
+ fragment = '> py\nhi\n'
tokens = [
- (Token.Text, u''),
- (Token.Generic.Prompt, u'>'),
- (Token.Text, u' '),
- (Token.Text, u'py'),
- (Token.Text, u'\n'),
- (Token.Generic.Output, u'hi\n'),
+ (Token.Text, ''),
+ (Token.Generic.Prompt, '>'),
+ (Token.Text, ' '),
+ (Token.Text, 'py'),
+ (Token.Text, '\n'),
+ (Token.Generic.Output, 'hi\n'),
]
assert list(lexer_msdos.get_tokens(fragment)) == tokens
def test_powershell_session(lexer_powershell_session):
- fragment = u'PS C:\\> Get-ChildItem\n'
+ fragment = 'PS C:\\> Get-ChildItem\n'
tokens = [
- (Token.Name.Builtin, u''),
- (Token.Generic.Prompt, u'PS C:\\> '),
- (Token.Name.Builtin, u'Get-ChildItem'),
- (Token.Text, u'\n')
+ (Token.Name.Builtin, ''),
+ (Token.Generic.Prompt, 'PS C:\\> '),
+ (Token.Name.Builtin, 'Get-ChildItem'),
+ (Token.Text, '\n')
]
assert list(lexer_powershell_session.get_tokens(fragment)) == tokens
- fragment = u'PS> Get-ChildItem\n'
+ fragment = 'PS> Get-ChildItem\n'
tokens = [
- (Token.Name.Builtin, u''),
- (Token.Generic.Prompt, u'PS> '),
- (Token.Name.Builtin, u'Get-ChildItem'),
- (Token.Text, u'\n')
+ (Token.Name.Builtin, ''),
+ (Token.Generic.Prompt, 'PS> '),
+ (Token.Name.Builtin, 'Get-ChildItem'),
+ (Token.Text, '\n')
]
assert list(lexer_powershell_session.get_tokens(fragment)) == tokens
- fragment = u'PS > Get-ChildItem\n'
+ fragment = 'PS > Get-ChildItem\n'
tokens = [
- (Token.Name.Builtin, u''),
- (Token.Generic.Prompt, u'PS > '),
- (Token.Name.Builtin, u'Get-ChildItem'),
- (Token.Text, u'\n')
+ (Token.Name.Builtin, ''),
+ (Token.Generic.Prompt, 'PS > '),
+ (Token.Name.Builtin, 'Get-ChildItem'),
+ (Token.Text, '\n')
]
assert list(lexer_powershell_session.get_tokens(fragment)) == tokens
def test_powershell_remoting_session(lexer_powershell_session):
- fragment = u'[Long-NetBIOS-Hostname]: PS C:\\> Get-ChildItem\n'
+ fragment = '[Long-NetBIOS-Hostname]: PS C:\\> Get-ChildItem\n'
tokens = [
- (Token.Name.Builtin, u''),
- (Token.Generic.Prompt, u'[Long-NetBIOS-Hostname]: PS C:\\> '),
- (Token.Name.Builtin, u'Get-ChildItem'),
- (Token.Text, u'\n')
+ (Token.Name.Builtin, ''),
+ (Token.Generic.Prompt, '[Long-NetBIOS-Hostname]: PS C:\\> '),
+ (Token.Name.Builtin, 'Get-ChildItem'),
+ (Token.Text, '\n')
]
assert list(lexer_powershell_session.get_tokens(fragment)) == tokens
def test_virtualenv(lexer_session):
- fragment = u'(env) [~/project]$ foo -h\n'
+ fragment = '(env) [~/project]$ foo -h\n'
tokens = [
- (Token.Text, u''),
- (Token.Generic.Prompt.VirtualEnv, u'(env)'),
- (Token.Text, u''),
- (Token.Text, u' '),
- (Token.Text, u''),
- (Token.Generic.Prompt, u'[~/project]$'),
- (Token.Text, u' '),
- (Token.Text, u'foo'),
- (Token.Text, u' '),
- (Token.Text, u'-h'),
- (Token.Text, u'\n'),
+ (Token.Text, ''),
+ (Token.Generic.Prompt.VirtualEnv, '(env)'),
+ (Token.Text, ''),
+ (Token.Text, ' '),
+ (Token.Text, ''),
+ (Token.Generic.Prompt, '[~/project]$'),
+ (Token.Text, ' '),
+ (Token.Text, 'foo'),
+ (Token.Text, ' '),
+ (Token.Text, '-h'),
+ (Token.Text, '\n'),
]
assert list(lexer_session.get_tokens(fragment)) == tokens
diff --git a/tests/test_smarty.py b/tests/test_smarty.py
index 2b45abee..caf23622 100644
--- a/tests/test_smarty.py
+++ b/tests/test_smarty.py
@@ -19,21 +19,21 @@ def lexer():
def test_nested_curly(lexer):
- fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
+ fragment = '{templateFunction param={anotherFunction} param2=$something}\n'
tokens = [
- (Token.Comment.Preproc, u'{'),
- (Token.Name.Function, u'templateFunction'),
- (Token.Text, u' '),
- (Token.Name.Attribute, u'param'),
- (Token.Operator, u'='),
- (Token.Comment.Preproc, u'{'),
- (Token.Name.Attribute, u'anotherFunction'),
- (Token.Comment.Preproc, u'}'),
- (Token.Text, u' '),
- (Token.Name.Attribute, u'param2'),
- (Token.Operator, u'='),
- (Token.Name.Variable, u'$something'),
- (Token.Comment.Preproc, u'}'),
- (Token.Other, u'\n'),
+ (Token.Comment.Preproc, '{'),
+ (Token.Name.Function, 'templateFunction'),
+ (Token.Text, ' '),
+ (Token.Name.Attribute, 'param'),
+ (Token.Operator, '='),
+ (Token.Comment.Preproc, '{'),
+ (Token.Name.Attribute, 'anotherFunction'),
+ (Token.Comment.Preproc, '}'),
+ (Token.Text, ' '),
+ (Token.Name.Attribute, 'param2'),
+ (Token.Operator, '='),
+ (Token.Name.Variable, '$something'),
+ (Token.Comment.Preproc, '}'),
+ (Token.Other, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_sql.py b/tests/test_sql.py
index ead06de7..bd71d1d0 100644
--- a/tests/test_sql.py
+++ b/tests/test_sql.py
@@ -62,7 +62,7 @@ def test_can_lex_integer(lexer):
def test_can_lex_names(lexer):
_assert_are_tokens_of_type(lexer,
- u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2',
+ 'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2',
Name)
diff --git a/tests/test_textfmts.py b/tests/test_textfmts.py
index f4ce9b33..b8e25b51 100644
--- a/tests/test_textfmts.py
+++ b/tests/test_textfmts.py
@@ -19,62 +19,62 @@ def lexer():
def test_http_status_line(lexer):
- fragment = u'HTTP/1.1 200 OK\n'
+ fragment = 'HTTP/1.1 200 OK\n'
tokens = [
- (Token.Keyword.Reserved, u'HTTP'),
- (Token.Operator, u'/'),
- (Token.Number, u'1.1'),
- (Token.Text, u' '),
- (Token.Number, u'200'),
- (Token.Text, u' '),
- (Token.Name.Exception, u'OK'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Reserved, 'HTTP'),
+ (Token.Operator, '/'),
+ (Token.Number, '1.1'),
+ (Token.Text, ' '),
+ (Token.Number, '200'),
+ (Token.Text, ' '),
+ (Token.Name.Exception, 'OK'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_http_status_line_without_reason_phrase(lexer):
- fragment = u'HTTP/1.1 200\n'
+ fragment = 'HTTP/1.1 200\n'
tokens = [
- (Token.Keyword.Reserved, u'HTTP'),
- (Token.Operator, u'/'),
- (Token.Number, u'1.1'),
- (Token.Text, u' '),
- (Token.Number, u'200'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Reserved, 'HTTP'),
+ (Token.Operator, '/'),
+ (Token.Number, '1.1'),
+ (Token.Text, ' '),
+ (Token.Number, '200'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_http_status_line_without_reason_phrase_rfc_7230(lexer):
- fragment = u'HTTP/1.1 200 \n'
+ fragment = 'HTTP/1.1 200 \n'
tokens = [
- (Token.Keyword.Reserved, u'HTTP'),
- (Token.Operator, u'/'),
- (Token.Number, u'1.1'),
- (Token.Text, u' '),
- (Token.Number, u'200'),
- (Token.Text, u' '),
- (Token.Text, u'\n'),
+ (Token.Keyword.Reserved, 'HTTP'),
+ (Token.Operator, '/'),
+ (Token.Number, '1.1'),
+ (Token.Text, ' '),
+ (Token.Number, '200'),
+ (Token.Text, ' '),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_application_xml(lexer):
- fragment = u'GET / HTTP/1.0\nContent-Type: application/xml\n\n<foo>\n'
+ fragment = 'GET / HTTP/1.0\nContent-Type: application/xml\n\n<foo>\n'
tokens = [
- (Token.Name.Tag, u'<foo'),
- (Token.Name.Tag, u'>'),
- (Token.Text, u'\n'),
+ (Token.Name.Tag, '<foo'),
+ (Token.Name.Tag, '>'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
def test_application_calendar_xml(lexer):
- fragment = u'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n<foo>\n'
+ fragment = 'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n<foo>\n'
tokens = [
- (Token.Name.Tag, u'<foo'),
- (Token.Name.Tag, u'>'),
- (Token.Text, u'\n'),
+ (Token.Name.Tag, '<foo'),
+ (Token.Name.Tag, '>'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
diff --git a/tests/test_usd.py b/tests/test_usd.py
index 954820b5..8edebeab 100644..100755
--- a/tests/test_usd.py
+++ b/tests/test_usd.py
@@ -217,66 +217,66 @@ class Features(_Common):
self.assertEqual(
[
- (Keyword.Token, u"custom"),
- (Whitespace, u" "),
- (Keyword.Type, u"int[]"),
- (Whitespace, u" "),
- (Name.Attribute, u"foo"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"["),
- (Number, u"8"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"10"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"14"),
- (Punctuation, u"]"),
- (Whitespace, u"\n"),
- (Keyword.Token, u"custom"),
- (Whitespace, u" "),
- (Keyword.Type, u"int[]"),
- (Whitespace, u" "),
- (Name.Attribute, u"foo"),
- (Text, u"."),
- (Name.Keyword.Tokens, u"timeSamples"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"{"),
- (Whitespace, u"\n "),
- (Number, u"1"),
- (Punctuation, u":"),
- (Whitespace, u" "),
- (Punctuation, u"["),
- (Number, u"8"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"0"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"14"),
- (Punctuation, u"]"),
- (Punctuation, u","),
- (Whitespace, u"\n "),
- (Number, u"2"),
- (Punctuation, u":"),
- (Whitespace, u" "),
- (Punctuation, u"["),
- (Number, u"-8"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"0"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"14"),
- (Punctuation, u"]"),
- (Punctuation, u","),
- (Whitespace, u"\n"),
- (Punctuation, u"}"),
- (Whitespace, u"\n"),
+ (Keyword.Token, "custom"),
+ (Whitespace, " "),
+ (Keyword.Type, "int[]"),
+ (Whitespace, " "),
+ (Name.Attribute, "foo"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "["),
+ (Number, "8"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "10"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "14"),
+ (Punctuation, "]"),
+ (Whitespace, "\n"),
+ (Keyword.Token, "custom"),
+ (Whitespace, " "),
+ (Keyword.Type, "int[]"),
+ (Whitespace, " "),
+ (Name.Attribute, "foo"),
+ (Text, "."),
+ (Name.Keyword.Tokens, "timeSamples"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "{"),
+ (Whitespace, "\n "),
+ (Number, "1"),
+ (Punctuation, ":"),
+ (Whitespace, " "),
+ (Punctuation, "["),
+ (Number, "8"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "0"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "14"),
+ (Punctuation, "]"),
+ (Punctuation, ","),
+ (Whitespace, "\n "),
+ (Number, "2"),
+ (Punctuation, ":"),
+ (Whitespace, " "),
+ (Punctuation, "["),
+ (Number, "-8"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "0"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "14"),
+ (Punctuation, "]"),
+ (Punctuation, ","),
+ (Whitespace, "\n"),
+ (Punctuation, "}"),
+ (Whitespace, "\n"),
],
self._get(timesamples),
)
@@ -292,8 +292,8 @@ class Features(_Common):
self.assertEqual(
[
- (String, u'"""\ncustom int[] foo = [8, 10, 14]\n"""'),
- (Whitespace, u"\n"),
+ (String, '"""\ncustom int[] foo = [8, 10, 14]\n"""'),
+ (Whitespace, "\n"),
],
self._get(code),
)
@@ -304,23 +304,23 @@ class Features(_Common):
self.assertEqual(
[
- (Number, u"8"),
- (Whitespace, u" "),
- (Number, u"8.0123312132"),
- (Punctuation, u","),
- (Whitespace, u" "),
- (Number, u"-4"),
- (Whitespace, u" "),
- (Number, u"-14.123"),
- (Whitespace, u" "),
- (Number, u"1e10"),
- (Whitespace, u" "),
- (Number, u"0.1e10"),
- (Whitespace, u" "),
- (Number, u"10.123e+10"),
- (Whitespace, u" "),
- (Number, u"0.123e-14"),
- (Whitespace, u"\n"),
+ (Number, "8"),
+ (Whitespace, " "),
+ (Number, "8.0123312132"),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (Number, "-4"),
+ (Whitespace, " "),
+ (Number, "-14.123"),
+ (Whitespace, " "),
+ (Number, "1e10"),
+ (Whitespace, " "),
+ (Number, "0.1e10"),
+ (Whitespace, " "),
+ (Number, "10.123e+10"),
+ (Whitespace, " "),
+ (Number, "0.123e-14"),
+ (Whitespace, "\n"),
],
self._get(code),
)
@@ -349,86 +349,86 @@ class Features(_Common):
self.assertEqual(
[
- (Keyword.Tokens, u"def"),
- (Whitespace, u" "),
- (Text, u"Xform"),
- (Whitespace, u" "),
- (String, u'"BottleMedical"'),
- (Whitespace, u" "),
- (Punctuation, u"("),
- (Whitespace, u"\n "),
- (Name.Builtins, u"kind"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (String, u'"prop"'),
- (Whitespace, u"\n "),
- (Keyword.Tokens, u"payload"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (String.Interpol, u"@./BottleMedical_payload.usd@"),
- (Name.Namespace, u"</BottleMedical>"),
- (Whitespace, u"\n "),
- (Keyword.Tokens, u"variants"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"{"),
- (Whitespace, u"\n "),
- (Keyword.Type, u"string"),
- (Whitespace, u" "),
- (Name.Attribute, u"modelingVariant"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (String, u'"LiquidBottleLg"'),
- (Whitespace, u"\n "),
- (Keyword.Type, u"string"),
- (Whitespace, u" "),
- (Name.Attribute, u"shadingComplexity"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (String, u'"full"'),
- (Whitespace, u"\n "),
- (Punctuation, u"}"),
- (Whitespace, u"\n "),
- (Keyword.Type, u"add"),
- (Text.Whitespace, u" "),
- (Name.Attribute, u"variantSets"),
- (Text.Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"["),
- (String, u'"modelingVariant"'),
- (Punctuation, u","),
- (Whitespace, u" "),
- (String, u'"shadingComplexity"'),
- (Punctuation, u"]"),
- (Whitespace, u"\n"),
- (Punctuation, u")"),
- (Whitespace, u"\n"),
- (Punctuation, u"{"),
- (Whitespace, u"\n "),
- (Keyword.Tokens, u"variantSet"),
- (Whitespace, u" "),
- (String, u'"modelingVariant"'),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"{"),
- (Whitespace, u"\n "),
- (String, u'"ALL_VARIANTS"'),
- (Whitespace, u" "),
- (Punctuation, u"{"),
- (Whitespace, u"\n "),
- (Punctuation, u"}"),
- (Whitespace, u"\n "),
- (Punctuation, u"}"),
- (Whitespace, u"\n"),
- (Punctuation, u"}"),
- (Whitespace, u"\n"),
+ (Keyword.Tokens, "def"),
+ (Whitespace, " "),
+ (Text, "Xform"),
+ (Whitespace, " "),
+ (String, '"BottleMedical"'),
+ (Whitespace, " "),
+ (Punctuation, "("),
+ (Whitespace, "\n "),
+ (Name.Builtins, "kind"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (String, '"prop"'),
+ (Whitespace, "\n "),
+ (Keyword.Tokens, "payload"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (String.Interpol, "@./BottleMedical_payload.usd@"),
+ (Name.Namespace, "</BottleMedical>"),
+ (Whitespace, "\n "),
+ (Keyword.Tokens, "variants"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "{"),
+ (Whitespace, "\n "),
+ (Keyword.Type, "string"),
+ (Whitespace, " "),
+ (Name.Attribute, "modelingVariant"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (String, '"LiquidBottleLg"'),
+ (Whitespace, "\n "),
+ (Keyword.Type, "string"),
+ (Whitespace, " "),
+ (Name.Attribute, "shadingComplexity"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (String, '"full"'),
+ (Whitespace, "\n "),
+ (Punctuation, "}"),
+ (Whitespace, "\n "),
+ (Keyword.Type, "add"),
+ (Text.Whitespace, " "),
+ (Name.Attribute, "variantSets"),
+ (Text.Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "["),
+ (String, '"modelingVariant"'),
+ (Punctuation, ","),
+ (Whitespace, " "),
+ (String, '"shadingComplexity"'),
+ (Punctuation, "]"),
+ (Whitespace, "\n"),
+ (Punctuation, ")"),
+ (Whitespace, "\n"),
+ (Punctuation, "{"),
+ (Whitespace, "\n "),
+ (Keyword.Tokens, "variantSet"),
+ (Whitespace, " "),
+ (String, '"modelingVariant"'),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "{"),
+ (Whitespace, "\n "),
+ (String, '"ALL_VARIANTS"'),
+ (Whitespace, " "),
+ (Punctuation, "{"),
+ (Whitespace, "\n "),
+ (Punctuation, "}"),
+ (Whitespace, "\n "),
+ (Punctuation, "}"),
+ (Whitespace, "\n"),
+ (Punctuation, "}"),
+ (Whitespace, "\n"),
],
self._get(code),
)
@@ -438,13 +438,13 @@ class Features(_Common):
code = '"Some \'text"'
self.assertEqual(
- [(String, code), (Whitespace, u"\n")], self._get(code),
+ [(String, code), (Whitespace, "\n")], self._get(code),
)
def test_string_multiple_line(self):
"""Check that different multi-line strings work correctly."""
code1 = textwrap.dedent(
- u'''\
+ '''\
"""
Some text multiline
"""'''
@@ -455,7 +455,7 @@ class Features(_Common):
)
code2 = textwrap.dedent(
- u'''\
+ '''\
"""Some text multiline
"""'''
)
@@ -465,7 +465,7 @@ class Features(_Common):
)
code3 = textwrap.dedent(
- u'''\
+ '''\
"""
Some text multiline"""'''
)
@@ -491,32 +491,32 @@ class EdgeCases(_Common):
self.assertEqual(
[
- (Keyword.Type, u"float[]"),
- (Whitespace, u" "),
- (Name.Attribute, u"primvars:skel:jointWeights"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Punctuation, u"["),
- (Number, u"1"),
- (Punctuation, u"]"),
- (Whitespace, u" "),
- (Punctuation, u"("),
- (Whitespace, u"\n "),
- (Name.Builtins, u"elementSize"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (Number, u"1"),
- (Whitespace, u"\n "),
- (Name.Builtins, u"interpolation"),
- (Whitespace, u" "),
- (Operator, u"="),
- (Whitespace, u" "),
- (String, u'"constant"'),
- (Whitespace, u"\n"),
- (Punctuation, u")"),
- (Whitespace, u"\n"),
+ (Keyword.Type, "float[]"),
+ (Whitespace, " "),
+ (Name.Attribute, "primvars:skel:jointWeights"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Punctuation, "["),
+ (Number, "1"),
+ (Punctuation, "]"),
+ (Whitespace, " "),
+ (Punctuation, "("),
+ (Whitespace, "\n "),
+ (Name.Builtins, "elementSize"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (Number, "1"),
+ (Whitespace, "\n "),
+ (Name.Builtins, "interpolation"),
+ (Whitespace, " "),
+ (Operator, "="),
+ (Whitespace, " "),
+ (String, '"constant"'),
+ (Whitespace, "\n"),
+ (Punctuation, ")"),
+ (Whitespace, "\n"),
],
self._get(code),
)
@@ -527,14 +527,14 @@ class EdgeCases(_Common):
self.assertEqual(
[
- (String.Interpol, u"@firststring@"),
- (Whitespace, u" "),
- (Text, u"something"),
- (Whitespace, u" "),
- (Text, u"else"),
- (Whitespace, u" "),
- (String.Interpol, u"@secondstring@"),
- (Whitespace, u"\n"),
+ (String.Interpol, "@firststring@"),
+ (Whitespace, " "),
+ (Text, "something"),
+ (Whitespace, " "),
+ (Text, "else"),
+ (Whitespace, " "),
+ (String.Interpol, "@secondstring@"),
+ (Whitespace, "\n"),
],
self._get(at_sign),
)
@@ -543,14 +543,14 @@ class EdgeCases(_Common):
self.assertEqual(
[
- (String, u"'firststring'"),
- (Whitespace, u" "),
- (Text, u"something"),
- (Whitespace, u" "),
- (Text, u"else"),
- (Whitespace, u" "),
- (String, u"'secondstring'"),
- (Whitespace, u"\n"),
+ (String, "'firststring'"),
+ (Whitespace, " "),
+ (Text, "something"),
+ (Whitespace, " "),
+ (Text, "else"),
+ (Whitespace, " "),
+ (String, "'secondstring'"),
+ (Whitespace, "\n"),
],
self._get(single),
)
@@ -559,14 +559,14 @@ class EdgeCases(_Common):
self.assertEqual(
[
- (String, u"'firststring'"),
- (Whitespace, u" "),
- (Text, u"something"),
- (Whitespace, u" "),
- (Text, u"else"),
- (Whitespace, u" "),
- (String, u"'secondstring'"),
- (Whitespace, u"\n"),
+ (String, "'firststring'"),
+ (Whitespace, " "),
+ (Text, "something"),
+ (Whitespace, " "),
+ (Text, "else"),
+ (Whitespace, " "),
+ (String, "'secondstring'"),
+ (Whitespace, "\n"),
],
self._get(double),
)
diff --git a/tests/test_util.py b/tests/test_util.py
index 81b3b054..b90397ea 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -127,23 +127,6 @@ def test_xml():
assert not util.looks_like_xml('<html>')
-def test_unirange():
- first_non_bmp = u'\U00010000'
- r = re.compile(util.unirange(0x10000, 0x20000))
- m = r.match(first_non_bmp)
- assert m
- assert m.end() == len(first_non_bmp)
- assert not r.match(u'\uffff')
- assert not r.match(u'xxx')
- # Tests that end is inclusive
- r = re.compile(util.unirange(0x10000, 0x10000) + '+')
- # Tests that the plus works for the entire unicode point, if narrow
- # build
- m = r.match(first_non_bmp * 2)
- assert m
- assert m.end() == len(first_non_bmp) * 2
-
-
def test_format_lines():
lst = ['cat', 'dog']
output = util.format_lines('var', lst)
@@ -173,8 +156,8 @@ def test_duplicates_removed_nonconsecutive():
def test_guess_decode():
# UTF-8 should be decoded as UTF-8
- s = util.guess_decode(u'\xff'.encode('utf-8'))
- assert s == (u'\xff', 'utf-8')
+ s = util.guess_decode('\xff'.encode('utf-8'))
+ assert s == ('\xff', 'utf-8')
# otherwise, it could be latin1 or the locale encoding...
import locale
@@ -186,11 +169,11 @@ def test_guess_decode_from_terminal():
class Term:
encoding = 'utf-7'
- s = util.guess_decode_from_terminal(u'\xff'.encode('utf-7'), Term)
- assert s == (u'\xff', 'utf-7')
+ s = util.guess_decode_from_terminal('\xff'.encode('utf-7'), Term)
+ assert s == ('\xff', 'utf-7')
- s = util.guess_decode_from_terminal(u'\xff'.encode('utf-8'), Term)
- assert s == (u'\xff', 'utf-8')
+ s = util.guess_decode_from_terminal('\xff'.encode('utf-8'), Term)
+ assert s == ('\xff', 'utf-8')
def test_console_ansiformat():
diff --git a/tests/test_whiley.py b/tests/test_whiley.py
index e844dafb..c895e17f 100644
--- a/tests/test_whiley.py
+++ b/tests/test_whiley.py
@@ -19,13 +19,13 @@ def lexer():
def test_whiley_operator(lexer):
- fragment = u'123 \u2200 x\n'
+ fragment = '123 \u2200 x\n'
tokens = [
- (Token.Literal.Number.Integer, u'123'),
- (Token.Text, u' '),
- (Token.Operator, u'\u2200'),
- (Token.Text, u' '),
- (Token.Name, u'x'),
- (Token.Text, u'\n'),
+ (Token.Literal.Number.Integer, '123'),
+ (Token.Text, ' '),
+ (Token.Operator, '\u2200'),
+ (Token.Text, ' '),
+ (Token.Name, 'x'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_yang.py b/tests/test_yang.py
index 1de57d05..e2449b59 100644
--- a/tests/test_yang.py
+++ b/tests/test_yang.py
@@ -20,13 +20,13 @@ def test_namespace_1(lexer):
"""
Namespace `urn:test:std:yang` should not be explicitly highlighted
"""
- fragment = u'namespace urn:test:std:yang;\n'
+ fragment = 'namespace urn:test:std:yang;\n'
tokens = [
- (Token.Keyword, u'namespace'),
- (Token.Text.Whitespace, u' '),
- (Token.Name.Variable, u'urn:test:std:yang'),
- (Token.Punctuation, u';'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'namespace'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Name.Variable, 'urn:test:std:yang'),
+ (Token.Punctuation, ';'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -34,15 +34,15 @@ def test_namespace_2(lexer):
"""
namespace-prefix `yang` should be explicitly highlighted
"""
- fragment = u'type yang:counter64;\n'
+ fragment = 'type yang:counter64;\n'
tokens = [
- (Token.Keyword, u'type'),
- (Token.Text.Whitespace, u' '),
- (Token.Name.Namespace, u'yang'),
- (Token.Punctuation, u':'),
- (Token.Name.Variable, u'counter64'),
- (Token.Punctuation, u';'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'type'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Name.Namespace, 'yang'),
+ (Token.Punctuation, ':'),
+ (Token.Name.Variable, 'counter64'),
+ (Token.Punctuation, ';'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -50,13 +50,13 @@ def test_revision_date(lexer):
"""
Revision-date `2020-08-03` should be explicitly highlighted
"""
- fragment = u'revision 2020-03-08{\n'
+ fragment = 'revision 2020-03-08{\n'
tokens = [
- (Token.Keyword, u'revision'),
- (Token.Text.Whitespace, u' '),
- (Token.Name.Label, u'2020-03-08'),
- (Token.Punctuation, u'{'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'revision'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Name.Label, '2020-03-08'),
+ (Token.Punctuation, '{'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -64,13 +64,13 @@ def test_integer_value(lexer):
"""
Integer value `5` should be explicitly highlighted
"""
- fragment = u'value 5;\n'
+ fragment = 'value 5;\n'
tokens = [
- (Token.Keyword, u'value'),
- (Token.Text.Whitespace, u' '),
- (Token.Number.Integer, u'5'),
- (Token.Punctuation, u';'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'value'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Number.Integer, '5'),
+ (Token.Punctuation, ';'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -78,13 +78,13 @@ def test_string_value(lexer):
"""
String value `"5"` should be not explicitly highlighted
"""
- fragment = u'value "5";\n'
+ fragment = 'value "5";\n'
tokens = [
- (Token.Keyword, u'value'),
- (Token.Text.Whitespace, u' '),
- (Token.String.Double, u'"5"'),
- (Token.Punctuation, u';'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'value'),
+ (Token.Text.Whitespace, ' '),
+ (Token.String.Double, '"5"'),
+ (Token.Punctuation, ';'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
@@ -92,12 +92,12 @@ def test_float_value(lexer):
"""
Float value `1.1` should be explicitly highlighted
"""
- fragment = u'yang-version 1.1;\n'
+ fragment = 'yang-version 1.1;\n'
tokens = [
- (Token.Keyword, u'yang-version'),
- (Token.Text.Whitespace, u' '),
- (Token.Number.Float, u'1.1'),
- (Token.Punctuation, u';'),
- (Token.Text.Whitespace, u'\n'),
+ (Token.Keyword, 'yang-version'),
+ (Token.Text.Whitespace, ' '),
+ (Token.Number.Float, '1.1'),
+ (Token.Punctuation, ';'),
+ (Token.Text.Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens