diff options
author | Matth?us G. Chajdas <dev@anteru.net> | 2019-11-10 13:56:53 +0100 |
---|---|---|
committer | Matth?us G. Chajdas <dev@anteru.net> | 2019-11-10 13:56:53 +0100 |
commit | 1dd3124a9770e11b6684e5dd1e6bc15a0aa3bc67 (patch) | |
tree | 87a171383266dd1f64196589af081bc2f8e497c3 /pygments/lexers/zig.py | |
parent | f1c080e184dc1bbc36eaa7cd729ff3a499de568a (diff) | |
download | pygments-master.tar.gz |
Diffstat (limited to 'pygments/lexers/zig.py')
-rw-r--r-- | pygments/lexers/zig.py | 126 |
1 files changed, 0 insertions, 126 deletions
diff --git a/pygments/lexers/zig.py b/pygments/lexers/zig.py deleted file mode 100644 index 7850fdf0..00000000 --- a/pygments/lexers/zig.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.zig - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for Zig. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from pygments.lexer import RegexLexer, bygroups, include, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error, Whitespace - -__all__ = ['ZigLexer'] - -class ZigLexer(RegexLexer): - """ - For `Zig <http://www.ziglang.org>`_ source code. - - grammar: https://ziglang.org/documentation/master/#Grammar - """ - name = 'Zig' - aliases = ['zig'] - filenames = ['*.zig'] - mimetypes = ['text/zig'] - - type_keywords = ( - words(('bool', 'f16', 'f32', 'f64', 'f128', 'void', 'noreturn', 'type', 'anyerror', 'promise', - 'i0', 'u0', 'isize', 'usize', 'comptime_int', 'comptime_float', - 'c_short', 'c_ushort', 'c_int', 'c_uint', 'c_long', 'c_ulong', 'c_longlong', 'c_ulonglong', 'c_longdouble', 'c_void' - 'i8', 'u8', 'i16', 'u16', 'i32', 'u32', 'i64', 'u64', 'i128', 'u128' - ), suffix=r'\b'), - Keyword.Type) - - storage_keywords = ( - words(('const', 'var', 'extern', 'packed', 'export', 'pub', 'noalias', - 'inline', 'comptime', 'nakedcc', 'stdcallcc', 'volatile', 'allowzero', - 'align', 'linksection', 'threadlocal'), suffix=r'\b'), - Keyword.Reserved) - - structure_keywords = ( - words(('struct', 'enum', 'union', 'error'), suffix=r'\b'), - Keyword) - - statement_keywords = ( - words(('break', 'return', 'continue', 'asm', 'defer', 'errdefer', - 'unreachable', 'try', 'catch', 'async', 'await', 'suspend', - 'resume', 'cancel'), suffix=r'\b'), - Keyword) - - conditional_keywords = ( - words(('if', 'else', 'switch', 'and', 'or', 'orelse'), suffix=r'\b'), - Keyword) - - repeat_keywords = ( - words(('while', 'for'), suffix=r'\b'), - Keyword) - - other_keywords = ( - words(('fn', 'usingnamespace', 'test'), suffix=r'\b'), - Keyword) - - constant_keywords = ( - words(('true', 'false', 'null', 'undefined'), suffix=r'\b'), - Keyword.Constant) - - tokens = { - 'root': [ - (r'\n', Whitespace), - (r'\s+', Whitespace), - (r'//.*?\n', Comment.Single), - - # Keywords - statement_keywords, - storage_keywords, - structure_keywords, - repeat_keywords, - type_keywords, - constant_keywords, - conditional_keywords, - other_keywords, - - # Floats - (r'0x[0-9a-fA-F]+\.[0-9a-fA-F]+([pP][\-+]?[0-9a-fA-F]+)?', Number.Float), - (r'0x[0-9a-fA-F]+\.?[pP][\-+]?[0-9a-fA-F]+', Number.Float), - (r'[0-9]+\.[0-9]+([eE][-+]?[0-9]+)?', Number.Float), - (r'[0-9]+\.?[eE][-+]?[0-9]+', Number.Float), - - # Integers - (r'0b[01]+', Number.Bin), - (r'0o[0-7]+', Number.Oct), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - - # Identifier - (r'@[a-zA-Z_]\w*',Name.Builtin), - (r'[a-zA-Z_]\w*', Name), - - # Characters - (r'\'\\\'\'', String.Escape), - (r'\'\\(|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])\'', String.Escape), - (r'\'[^\\\']\'', String), - - # Strings - (r'\\\\[^\n]*', String.Heredoc), - (r'c\\\\[^\n]*', String.Heredoc), - (r'c?"',String, 'string'), - - # Operators, Punctuation - (r'[+%=><|^!?/\-*&~:]', Operator), - (r'[{}()\[\],.;]', Punctuation) - ], - 'string': [ - (r'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])', String.Escape), - (r'[^\\"\n]+', String), - (r'"', String, '#pop') - ] - } - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - yield index, token, value |