summaryrefslogtreecommitdiff
path: root/tests/test_clexer.py
diff options
context:
space:
mode:
authorMatthäus G. Chajdas <dev@anteru.net>2020-09-08 20:33:25 +0200
committerMatthäus G. Chajdas <dev@anteru.net>2020-09-08 20:33:25 +0200
commit203ef1eff6daebab6f95b0b49e6e6a58168073fb (patch)
tree7defa199f48a34787f980b6400d8bbaa9380039a /tests/test_clexer.py
parente09d4e0cf23d7c6069ddc690942ceb4cd23fd556 (diff)
parentb2c91c70ee536b0472100d1273818f8bb45529fe (diff)
downloadpygments-git-bug/angular-html.tar.gz
Merge branch 'master' into bug/angular-htmlbug/angular-html
# Conflicts: # tests/test_shell.py
Diffstat (limited to 'tests/test_clexer.py')
-rw-r--r--tests/test_clexer.py306
1 files changed, 153 insertions, 153 deletions
diff --git a/tests/test_clexer.py b/tests/test_clexer.py
index d40ec491..d6561243 100644
--- a/tests/test_clexer.py
+++ b/tests/test_clexer.py
@@ -33,7 +33,7 @@ def test_numbers(lexer):
def test_switch(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
switch (0)
@@ -45,48 +45,48 @@ def test_switch(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'switch'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'case'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'default'),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'switch'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'case'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'default'),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_switch_space_before_colon(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
switch (0)
@@ -98,50 +98,50 @@ def test_switch_space_before_colon(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'switch'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'case'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Text, u' '),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'default'),
- (Token.Text, u' '),
- (Token.Operator, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'switch'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '('),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'case'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Text, ' '),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'default'),
+ (Token.Text, ' '),
+ (Token.Operator, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo:
@@ -149,31 +149,31 @@ def test_label(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Punctuation, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label_space_before_colon(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo :
@@ -181,32 +181,32 @@ def test_label_space_before_colon(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Text, u' '),
- (Token.Punctuation, u':'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Text, ' '),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_label_followed_by_statement(lexer):
- fragment = u'''\
+ fragment = '''\
int main()
{
foo:return 0;
@@ -214,52 +214,52 @@ def test_label_followed_by_statement(lexer):
}
'''
tokens = [
- (Token.Keyword.Type, u'int'),
- (Token.Text, u' '),
- (Token.Name.Function, u'main'),
- (Token.Punctuation, u'('),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'{'),
- (Token.Text, u'\n'),
- (Token.Name.Label, u'foo'),
- (Token.Punctuation, u':'),
- (Token.Keyword, u'return'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'goto'),
- (Token.Text, u' '),
- (Token.Name, u'foo'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- (Token.Punctuation, u'}'),
- (Token.Text, u'\n'),
+ (Token.Keyword.Type, 'int'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'main'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '{'),
+ (Token.Text, '\n'),
+ (Token.Name.Label, 'foo'),
+ (Token.Punctuation, ':'),
+ (Token.Keyword, 'return'),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '0'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'goto'),
+ (Token.Text, ' '),
+ (Token.Name, 'foo'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ (Token.Punctuation, '}'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens
def test_preproc_file(lexer):
- fragment = u'#include <foo>\n'
+ fragment = '#include <foo>\n'
tokens = [
- (Token.Comment.Preproc, u'#'),
- (Token.Comment.Preproc, u'include'),
- (Token.Text, u' '),
- (Token.Comment.PreprocFile, u'<foo>'),
- (Token.Comment.Preproc, u'\n'),
+ (Token.Comment.Preproc, '#'),
+ (Token.Comment.Preproc, 'include'),
+ (Token.Text, ' '),
+ (Token.Comment.PreprocFile, '<foo>'),
+ (Token.Comment.Preproc, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_preproc_file2(lexer):
- fragment = u'#include "foo.h"\n'
+ fragment = '#include "foo.h"\n'
tokens = [
- (Token.Comment.Preproc, u'#'),
- (Token.Comment.Preproc, u'include'),
- (Token.Text, u' '),
- (Token.Comment.PreprocFile, u'"foo.h"'),
- (Token.Comment.Preproc, u'\n'),
+ (Token.Comment.Preproc, '#'),
+ (Token.Comment.Preproc, 'include'),
+ (Token.Text, ' '),
+ (Token.Comment.PreprocFile, '"foo.h"'),
+ (Token.Comment.Preproc, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens