summaryrefslogtreecommitdiff
path: root/pygments/lexers/robotframework.py
diff options
context:
space:
mode:
authorMatthäus G. Chajdas <dev@anteru.net>2020-09-08 19:45:20 +0200
committerMatthäus G. Chajdas <dev@anteru.net>2020-09-08 19:45:20 +0200
commit98f816ae5ca7d98f388ace349a29b154fa9dc9e1 (patch)
treed0494ebb40613432ef5c157c69c722393164710e /pygments/lexers/robotframework.py
parentb6d1d68de705e1cfc28a4188f792b29c545bf7ed (diff)
parent945ed5ef268e2f3c7bbea42dfae1f8f844096f61 (diff)
downloadpygments-git-98f816ae5ca7d98f388ace349a29b154fa9dc9e1.tar.gz
Merge branch 'master' into bug/angular-html
Diffstat (limited to 'pygments/lexers/robotframework.py')
-rw-r--r--pygments/lexers/robotframework.py35
1 files changed, 14 insertions, 21 deletions
diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py
index ddaddb22..cd808292 100644
--- a/pygments/lexers/robotframework.py
+++ b/pygments/lexers/robotframework.py
@@ -5,7 +5,7 @@
Lexer for Robot Framework.
- :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -98,16 +98,13 @@ class VariableTokenizer:
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
- for value, token in self.tokenize(var.base, VARIABLE):
- yield value, token
+ yield from self.tokenize(var.base, VARIABLE)
yield '}', SYNTAX
if var.index:
yield '[', SYNTAX
- for value, token in self.tokenize(var.index, VARIABLE):
- yield value, token
+ yield from self.tokenize(var.index, VARIABLE)
yield ']', SYNTAX
- for value, token in self.tokenize(string[var.end:], orig_token):
- yield value, token
+ yield from self.tokenize(string[var.end:], orig_token)
class RowTokenizer:
@@ -123,6 +120,7 @@ class RowTokenizer:
'metadata': settings,
'variables': variables, 'variable': variables,
'testcases': testcases, 'testcase': testcases,
+ 'tasks': testcases, 'task': testcases,
'keywords': keywords, 'keyword': keywords,
'userkeywords': keywords, 'userkeyword': keywords}
@@ -137,9 +135,8 @@ class RowTokenizer:
elif index == 0 and value.startswith('*'):
self._table = self._start_table(value)
heading = True
- for value, token in self._tokenize(value, index, commented,
- separator, heading):
- yield value, token
+ yield from self._tokenize(value, index, commented,
+ separator, heading)
self._table.end_row()
def _start_table(self, header):
@@ -154,8 +151,7 @@ class RowTokenizer:
elif heading:
yield value, HEADING
else:
- for value, token in self._table.tokenize(value, index):
- yield value, token
+ yield from self._table.tokenize(value, index)
class RowSplitter:
@@ -165,14 +161,12 @@ class RowSplitter:
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
- for value in splitter(row):
- yield value
+ yield from splitter(row)
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
- for value in self._space_splitter.split(row):
- yield value
+ yield from self._space_splitter.split(row)
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
@@ -215,11 +209,11 @@ class Comment(Tokenizer):
class Setting(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
- 'suitepostcondition', 'testsetup', 'testprecondition',
- 'testteardown', 'testpostcondition', 'testtemplate')
+ 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
+ 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
- 'testtimeout')
+ 'testtimeout','tasktimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
@@ -332,8 +326,7 @@ class _Table:
self._tokenizer = self._prev_tokenizer
yield value, SYNTAX
else:
- for value_and_token in self._tokenize(value, index):
- yield value_and_token
+ yield from self._tokenize(value, index)
self._prev_values_on_row.append(value)
def _continues(self, value, index):