diff options
author | Matthäus G. Chajdas <dev@anteru.net> | 2020-09-08 20:33:25 +0200 |
---|---|---|
committer | Matthäus G. Chajdas <dev@anteru.net> | 2020-09-08 20:33:25 +0200 |
commit | 203ef1eff6daebab6f95b0b49e6e6a58168073fb (patch) | |
tree | 7defa199f48a34787f980b6400d8bbaa9380039a /tests/test_bibtex.py | |
parent | e09d4e0cf23d7c6069ddc690942ceb4cd23fd556 (diff) | |
parent | b2c91c70ee536b0472100d1273818f8bb45529fe (diff) | |
download | pygments-git-bug/angular-html.tar.gz |
Merge branch 'master' into bug/angular-htmlbug/angular-html
# Conflicts:
# tests/test_shell.py
Diffstat (limited to 'tests/test_bibtex.py')
-rw-r--r-- | tests/test_bibtex.py | 290 |
1 files changed, 145 insertions, 145 deletions
diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py index dfa668f2..d7bc02ac 100644 --- a/tests/test_bibtex.py +++ b/tests/test_bibtex.py @@ -21,39 +21,39 @@ def lexer(): def test_preamble(lexer): - data = u'@PREAMBLE{"% some LaTeX code here"}' + data = '@PREAMBLE{"% some LaTeX code here"}' tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'{'), - (Token.String, u'"'), - (Token.String, u'% some LaTeX code here'), - (Token.String, u'"'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), + (Token.Name.Class, '@PREAMBLE'), + (Token.Punctuation, '{'), + (Token.String, '"'), + (Token.String, '% some LaTeX code here'), + (Token.String, '"'), + (Token.Punctuation, '}'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(data)) == tokens def test_string(lexer): - data = u'@STRING(SCI = "Science")' + data = '@STRING(SCI = "Science")' tokens = [ - (Token.Name.Class, u'@STRING'), - (Token.Punctuation, u'('), - (Token.Name.Attribute, u'SCI'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Science'), - (Token.String, u'"'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), + (Token.Name.Class, '@STRING'), + (Token.Punctuation, '('), + (Token.Name.Attribute, 'SCI'), + (Token.Text, ' '), + (Token.Punctuation, '='), + (Token.Text, ' '), + (Token.String, '"'), + (Token.String, 'Science'), + (Token.String, '"'), + (Token.Punctuation, ')'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(data)) == tokens def test_entry(lexer): - data = u""" + data = """ This is a comment. @ARTICLE{ruckenstein-diffusion, @@ -65,56 +65,56 @@ def test_entry(lexer): """ tokens = [ - (Token.Comment, u'This is a comment.'), - (Token.Text, u'\n\n'), - (Token.Name.Class, u'@ARTICLE'), - (Token.Punctuation, u'{'), - (Token.Name.Label, u'ruckenstein-diffusion'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'author'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Liu, Hongquin'), - (Token.String, u'"'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.Name.Variable, u'and'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Ruckenstein, Eli'), - (Token.String, u'"'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'year'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Number, u'1997'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'month'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Name.Variable, u'JAN'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'pages'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'888-895'), - (Token.String, u'"'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), + (Token.Comment, 'This is a comment.'), + (Token.Text, '\n\n'), + (Token.Name.Class, '@ARTICLE'), + (Token.Punctuation, '{'), + (Token.Name.Label, 'ruckenstein-diffusion'), + (Token.Punctuation, ','), + (Token.Text, '\n '), + (Token.Name.Attribute, 'author'), + (Token.Text, ' '), + (Token.Punctuation, '='), + (Token.Text, ' '), + (Token.String, '"'), + (Token.String, 'Liu, Hongquin'), + (Token.String, '"'), + (Token.Text, ' '), + (Token.Punctuation, '#'), + (Token.Text, ' '), + (Token.Name.Variable, 'and'), + (Token.Text, ' '), + (Token.Punctuation, '#'), + (Token.Text, ' '), + (Token.String, '"'), + (Token.String, 'Ruckenstein, Eli'), + (Token.String, '"'), + (Token.Punctuation, ','), + (Token.Text, '\n '), + (Token.Name.Attribute, 'year'), + (Token.Text, ' '), + (Token.Punctuation, '='), + (Token.Text, ' '), + (Token.Number, '1997'), + (Token.Punctuation, ','), + (Token.Text, '\n '), + (Token.Name.Attribute, 'month'), + (Token.Text, ' '), + (Token.Punctuation, '='), + (Token.Text, ' '), + (Token.Name.Variable, 'JAN'), + (Token.Punctuation, ','), + (Token.Text, '\n '), + (Token.Name.Attribute, 'pages'), + (Token.Text, ' '), + (Token.Punctuation, '='), + (Token.Text, ' '), + (Token.String, '"'), + (Token.String, '888-895'), + (Token.String, '"'), + (Token.Text, '\n'), + (Token.Punctuation, '}'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens @@ -122,9 +122,9 @@ def test_entry(lexer): def test_comment(lexer): data = '@COMMENT{test}' tokens = [ - (Token.Comment, u'@COMMENT'), - (Token.Comment, u'{test}'), - (Token.Text, u'\n'), + (Token.Comment, '@COMMENT'), + (Token.Comment, '{test}'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(data)) == tokens @@ -132,12 +132,12 @@ def test_comment(lexer): def test_missing_body(lexer): data = '@ARTICLE xxx' tokens = [ - (Token.Name.Class, u'@ARTICLE'), - (Token.Text, u' '), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Text, u'\n'), + (Token.Name.Class, '@ARTICLE'), + (Token.Text, ' '), + (Token.Error, 'x'), + (Token.Error, 'x'), + (Token.Error, 'x'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(data)) == tokens @@ -145,12 +145,12 @@ def test_missing_body(lexer): def test_mismatched_brace(lexer): data = '@PREAMBLE(""}' tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'('), - (Token.String, u'"'), - (Token.String, u'"'), - (Token.Error, u'}'), - (Token.Text, u'\n'), + (Token.Name.Class, '@PREAMBLE'), + (Token.Punctuation, '('), + (Token.String, '"'), + (Token.String, '"'), + (Token.Error, '}'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(data)) == tokens @@ -177,64 +177,64 @@ def test_basic_bst(): """ tokens = [ (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), - (Token.Text, u'\n\n'), - (Token.Keyword, u'INTEGERS'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Variable, u'output.state'), - (Token.Text, u' '), - (Token.Name.Variable, u'before.all'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'FUNCTION'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Variable, u'sort.format.title'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Function, u"'t"), - (Token.Text, u' '), - (Token.Name.Variable, u':='), - (Token.Text, u'\n'), - (Token.Literal.String, u'"A "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#2'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"An "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#3'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"The "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#4'), - (Token.Text, u' '), - (Token.Name.Variable, u't'), - (Token.Text, u' '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'sortify'), - (Token.Text, u'\n'), - (Token.Literal.Number, u'#1'), - (Token.Text, u' '), - (Token.Name.Builtin, u'global.max$'), - (Token.Text, u' '), - (Token.Name.Builtin, u'substring$'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'ITERATE'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Builtin, u'call.type$'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), + (Token.Text, '\n\n'), + (Token.Keyword, 'INTEGERS'), + (Token.Text, ' '), + (Token.Punctuation, '{'), + (Token.Text, ' '), + (Token.Name.Variable, 'output.state'), + (Token.Text, ' '), + (Token.Name.Variable, 'before.all'), + (Token.Text, ' '), + (Token.Punctuation, '}'), + (Token.Text, '\n\n'), + (Token.Keyword, 'FUNCTION'), + (Token.Text, ' '), + (Token.Punctuation, '{'), + (Token.Name.Variable, 'sort.format.title'), + (Token.Punctuation, '}'), + (Token.Text, '\n'), + (Token.Punctuation, '{'), + (Token.Text, ' '), + (Token.Name.Function, "'t"), + (Token.Text, ' '), + (Token.Name.Variable, ':='), + (Token.Text, '\n'), + (Token.Literal.String, '"A "'), + (Token.Text, ' '), + (Token.Literal.Number, '#2'), + (Token.Text, '\n '), + (Token.Literal.String, '"An "'), + (Token.Text, ' '), + (Token.Literal.Number, '#3'), + (Token.Text, '\n '), + (Token.Literal.String, '"The "'), + (Token.Text, ' '), + (Token.Literal.Number, '#4'), + (Token.Text, ' '), + (Token.Name.Variable, 't'), + (Token.Text, ' '), + (Token.Name.Variable, 'chop.word'), + (Token.Text, '\n '), + (Token.Name.Variable, 'chop.word'), + (Token.Text, '\n'), + (Token.Name.Variable, 'chop.word'), + (Token.Text, '\n'), + (Token.Name.Variable, 'sortify'), + (Token.Text, '\n'), + (Token.Literal.Number, '#1'), + (Token.Text, ' '), + (Token.Name.Builtin, 'global.max$'), + (Token.Text, ' '), + (Token.Name.Builtin, 'substring$'), + (Token.Text, '\n'), + (Token.Punctuation, '}'), + (Token.Text, '\n\n'), + (Token.Keyword, 'ITERATE'), + (Token.Text, ' '), + (Token.Punctuation, '{'), + (Token.Name.Builtin, 'call.type$'), + (Token.Punctuation, '}'), + (Token.Text, '\n'), ] assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens |