diff options
author | Georg Brandl <georg@python.org> | 2020-09-06 12:02:11 +0200 |
---|---|---|
committer | Georg Brandl <georg@python.org> | 2020-09-06 12:02:11 +0200 |
commit | 5509d51a8c3b8a0d7c3542e47a8de2edb0d61277 (patch) | |
tree | b2481a6872c48078e5ea8e0b57450c7e8f0a05bf | |
parent | 603e8eddbf413deeb452ff110431da001e795438 (diff) | |
download | pygments-git-5509d51a8c3b8a0d7c3542e47a8de2edb0d61277.tar.gz |
all: fixup some file headers
-rw-r--r-- | pygments/lexers/_usd_builtins.py | 10 | ||||
-rw-r--r-- | pygments/lexers/arrow.py | 4 | ||||
-rw-r--r-- | pygments/lexers/bare.py | 4 | ||||
-rw-r--r-- | pygments/lexers/devicetree.py | 4 | ||||
-rw-r--r-- | pygments/lexers/gdscript.py | 31 | ||||
-rw-r--r-- | pygments/lexers/pointless.py | 9 | ||||
-rw-r--r-- | pygments/lexers/tnt.py | 35 | ||||
-rw-r--r-- | pygments/lexers/webidl.py | 5 |
8 files changed, 50 insertions, 52 deletions
diff --git a/pygments/lexers/_usd_builtins.py b/pygments/lexers/_usd_builtins.py index 0c7316a6..edcbde75 100644 --- a/pygments/lexers/_usd_builtins.py +++ b/pygments/lexers/_usd_builtins.py @@ -1,7 +1,13 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- +""" + pygments.lexers._usd_builtins + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -"""A collection of known USD-related keywords, attributes, and types.""" + A collection of known USD-related keywords, attributes, and types. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" COMMON_ATTRIBUTES = [ "extent", diff --git a/pygments/lexers/arrow.py b/pygments/lexers/arrow.py index 560d6de6..0f57b145 100644 --- a/pygments/lexers/arrow.py +++ b/pygments/lexers/arrow.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ pygments.lexers.arrow - ~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~ Lexer for Arrow. - :copyright: Copyright 2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/pygments/lexers/bare.py b/pygments/lexers/bare.py index 8466361e..d63a13e8 100644 --- a/pygments/lexers/bare.py +++ b/pygments/lexers/bare.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ pygments.lexers.bare - ~~~~~~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~ Lexer for the BARE schema. - :copyright: Copyright 2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/pygments/lexers/devicetree.py b/pygments/lexers/devicetree.py index 0fc26de3..50fa79c2 100644 --- a/pygments/lexers/devicetree.py +++ b/pygments/lexers/devicetree.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ pygments.lexers.devicetree - ~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Devicetree language. - :copyright: Copyright 2019-2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/pygments/lexers/gdscript.py b/pygments/lexers/gdscript.py index 8dec78fb..7d9b9e6e 100644 --- a/pygments/lexers/gdscript.py +++ b/pygments/lexers/gdscript.py @@ -1,36 +1,23 @@ # -*- coding: utf-8 -*- """ pygments.lexers.gdscript - ~~~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for GDScript. - :copyright: Copyright 2xxx by The Godot Engine Community - :license: BSD, see LICENSE for details. + Modified by Daniel J. Ramirez <djrmuv@gmail.com> based on the original + python.py. - modified by Daniel J. Ramirez <djrmuv@gmail.com> based on the original python.py pygment + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. """ import re -from pygments.lexer import ( - RegexLexer, - include, - bygroups, - default, - words, - combined, -) -from pygments.token import ( - Text, - Comment, - Operator, - Keyword, - Name, - String, - Number, - Punctuation, -) +from pygments.lexer import RegexLexer, include, bygroups, default, words, \ + combined +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation __all__ = ["GDScriptLexer"] diff --git a/pygments/lexers/pointless.py b/pygments/lexers/pointless.py index a0a5cefa..9acc3ffb 100644 --- a/pygments/lexers/pointless.py +++ b/pygments/lexers/pointless.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- """ pygments.lexers.pointless - ~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Pointless. - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import * +from pygments.lexer import RegexLexer, words +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Text __all__ = ['PointlessLexer'] diff --git a/pygments/lexers/tnt.py b/pygments/lexers/tnt.py index 13c2b5d7..f62f3ab9 100644 --- a/pygments/lexers/tnt.py +++ b/pygments/lexers/tnt.py @@ -5,16 +5,19 @@ Lexer for Typographic Number Theory. - :copyright: Copyright 2019-2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ + import re + from pygments.lexer import Lexer from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \ - Punctuation, Error + Punctuation, Error __all__ = ['TNTLexer'] + class TNTLexer(Lexer): """ Lexer for Typographic Number Theory, as described in the book @@ -77,18 +80,18 @@ class TNTLexer(Lexer): def term(self, start, text): """Tokenize a term.""" - if text[start] == 'S': # S...S(...) or S...0 + if text[start] == 'S': # S...S(...) or S...0 end = start+1 while text[end] == 'S': end += 1 self.cur.append((start, Number.Integer, text[start:end])) return self.term(end, text) - if text[start] == '0': # the singleton 0 + if text[start] == '0': # the singleton 0 self.cur.append((start, Number.Integer, text[start])) return start+1 - if text[start] in self.VARIABLES: # a''... + if text[start] in self.VARIABLES: # a''... return self.variable(start, text) - if text[start] == '(': # (...+...) + if text[start] == '(': # (...+...) self.cur.append((start, Punctuation, text[start])) start = self.term(start+1, text) assert text[start] in self.OPERATORS @@ -97,26 +100,26 @@ class TNTLexer(Lexer): assert text[start] == ')' self.cur.append((start, Punctuation, text[start])) return start+1 - raise AssertionError # no matches + raise AssertionError # no matches def formula(self, start, text): """Tokenize a formula.""" - if text[start] in '[]': # fantasy push or pop + if text[start] in '[]': # fantasy push or pop self.cur.append((start, Keyword, text[start])) return start+1 - if text[start] in self.NEGATORS: # ~<...> + if text[start] in self.NEGATORS: # ~<...> end = start+1 while text[end] in self.NEGATORS: end += 1 self.cur.append((start, Operator, text[start:end])) return self.formula(end, text) - if text[start] in self.QUANTIFIERS: # Aa:<...> + if text[start] in self.QUANTIFIERS: # Aa:<...> self.cur.append((start, Keyword.Declaration, text[start])) start = self.variable(start+1, text) assert text[start] == ':' self.cur.append((start, Punctuation, text[start])) return self.formula(start+1, text) - if text[start] == '<': # <...&...> + if text[start] == '<': # <...&...> self.cur.append((start, Punctuation, text[start])) start = self.formula(start+1, text) assert text[start] in self.LOGIC @@ -136,9 +139,9 @@ class TNTLexer(Lexer): """Tokenize a rule.""" match = self.RULES.match(text, start) assert match is not None - groups = sorted(match.regs[1:]) # exclude whole match + groups = sorted(match.regs[1:]) # exclude whole match for group in groups: - if group[0] >= 0: # this group matched + if group[0] >= 0: # this group matched self.cur.append((start, Keyword, text[start:group[0]])) self.cur.append((group[0], Number.Integer, text[group[0]:group[1]])) @@ -169,7 +172,7 @@ class TNTLexer(Lexer): """Mark everything from ``start`` to the end of the line as Error.""" end = start try: - while text[end] != '\n': # there's whitespace in rules + while text[end] != '\n': # there's whitespace in rules end += 1 except IndexError: end = len(text) @@ -186,7 +189,7 @@ class TNTLexer(Lexer): # try line number while text[end] in self.NUMBERS: end += 1 - if end != start: # actual number present + if end != start: # actual number present self.cur.append((start, Number.Integer, text[start:end])) # whitespace is required after a line number orig = len(self.cur) @@ -210,7 +213,7 @@ class TNTLexer(Lexer): orig = len(self.cur) try: start = end = self.formula(start, text) - except AssertionError: # not well-formed + except AssertionError: # not well-formed del self.cur[orig:] while text[end] not in self.WHITESPACE: end += 1 diff --git a/pygments/lexers/webidl.py b/pygments/lexers/webidl.py index df1f48ed..ef8518ce 100644 --- a/pygments/lexers/webidl.py +++ b/pygments/lexers/webidl.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """ pygments.lexers.webidl - ~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Web IDL, including some extensions. - :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -36,6 +36,7 @@ _identifier = r'_?[A-Za-z][\w-]*' _keyword_suffix = r'(?![\w-])' _string = r'"[^"]*"' + class WebIDLLexer(RegexLexer): """ For Web IDL. |