diff options
author | Ezio Melotti <ezio.melotti@gmail.com> | 2012-02-13 16:28:54 +0200 |
---|---|---|
committer | Ezio Melotti <ezio.melotti@gmail.com> | 2012-02-13 16:28:54 +0200 |
commit | f117443cb8afa3b2d91b4fef861db17866d6b6df (patch) | |
tree | 4355ba11b5406084325b960dbd4dc464ad8fa202 | |
parent | 4b92cc3f7924e455b7e41cf1a66034a44ede0cc0 (diff) | |
download | cpython-git-f117443cb8afa3b2d91b4fef861db17866d6b6df.tar.gz |
#13993: HTMLParser is now able to handle broken end tags.
-rw-r--r-- | Lib/HTMLParser.py | 34 | ||||
-rw-r--r-- | Lib/test/test_htmlparser.py | 44 | ||||
-rw-r--r-- | Misc/NEWS | 2 |
3 files changed, 69 insertions, 11 deletions
diff --git a/Lib/HTMLParser.py b/Lib/HTMLParser.py index 516bc70147..6cc9ff13bf 100644 --- a/Lib/HTMLParser.py +++ b/Lib/HTMLParser.py @@ -23,6 +23,9 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*') +# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state +# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' @@ -243,7 +246,7 @@ class HTMLParser(markupbase.ParserBase): # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state def parse_bogus_comment(self, i, report=1): rawdata = self.rawdata - if rawdata[i:i+2] != '<!': + if rawdata[i:i+2] not in ('<!', '</'): self.error('unexpected call to parse_comment()') pos = rawdata.find('>', i+2) if pos == -1: @@ -353,23 +356,38 @@ class HTMLParser(markupbase.ParserBase): match = endendtag.search(rawdata, i+1) # > if not match: return -1 - j = match.end() + gtpos = match.end() match = endtagfind.match(rawdata, i) # </ + tag + > if not match: if self.cdata_elem is not None: - self.handle_data(rawdata[i:j]) - return j - self.error("bad end tag: %r" % (rawdata[i:j],)) + self.handle_data(rawdata[i:gtpos]) + return gtpos + # find the name: w3.org/TR/html5/tokenization.html#tag-name-state + namematch = tagfind_tolerant.match(rawdata, i+2) + if not namematch: + # w3.org/TR/html5/tokenization.html#end-tag-open-state + if rawdata[i:i+3] == '</>': + return i+3 + else: + return self.parse_bogus_comment(i) + tagname = namematch.group().lower() + # consume and ignore other stuff between the name and the > + # Note: this is not 100% correct, since we might have things like + # </tag attr=">">, but looking for > after tha name should cover + # most of the cases and is much simpler + gtpos = rawdata.find('>', namematch.end()) + self.handle_endtag(tagname) + return gtpos+1 elem = match.group(1).lower() # script or style if self.cdata_elem is not None: if elem != self.cdata_elem: - self.handle_data(rawdata[i:j]) - return j + self.handle_data(rawdata[i:gtpos]) + return gtpos self.handle_endtag(elem) self.clear_cdata_mode() - return j + return gtpos # Overridable -- finish processing of start+end tag: <tag.../> def handle_startendtag(self, tag, attrs): diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py index 29a721cf45..c15bb660e2 100644 --- a/Lib/test/test_htmlparser.py +++ b/Lib/test/test_htmlparser.py @@ -202,12 +202,12 @@ text self._run_check(["<!--abc-->", ""], output) def test_starttag_junk_chars(self): - self._parse_error("</>") - self._parse_error("</$>") + self._run_check("</>", []) + self._run_check("</$>", [('comment', '$')]) self._parse_error("</") self._parse_error("</a") self._parse_error("<a<a>") - self._parse_error("</a<a>") + self._run_check("</a<a>", [('endtag', 'a<a')]) self._parse_error("<!") self._parse_error("<a") self._parse_error("<a foo='bar'") @@ -232,6 +232,44 @@ text ("endtag", "p"), ]) + def test_invalid_end_tags(self): + # A collection of broken end tags. <br> is used as separator. + # see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state + # and #13993 + html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>' + '</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>') + expected = [('starttag', 'br', []), + # < is part of the name, / is discarded, p is an attribute + ('endtag', 'label<'), + ('starttag', 'br', []), + # text and attributes are discarded + ('endtag', 'div'), + ('starttag', 'br', []), + # comment because the first char after </ is not a-zA-Z + ('comment', '<h4'), + ('starttag', 'br', []), + # attributes are discarded + ('endtag', 'li'), + ('starttag', 'br', []), + # everything till ul (included) is discarded + ('endtag', 'li'), + ('starttag', 'br', []), + # </> is ignored + ('starttag', 'br', [])] + self._run_check(html, expected) + + def test_broken_invalid_end_tag(self): + # This is technically wrong (the "> shouldn't be included in the 'data') + # but is probably not worth fixing it (in addition to all the cases of + # the previous test, it would require a full attribute parsing). + # see #13993 + html = '<b>This</b attr=">"> confuses the parser' + expected = [('starttag', 'b', []), + ('data', 'This'), + ('endtag', 'b'), + ('data', '"> confuses the parser')] + self._run_check(html, expected) + def test_get_starttag_text(self): s = """<foo:bar \n one="1"\ttwo=2 >""" self._run_check_extra(s, [ @@ -90,6 +90,8 @@ Core and Builtins Library ------- +- Issue #13993: HTMLParser is now able to handle broken end tags. + - Issue #13960: HTMLParser is now able to handle broken comments. - Issue #9750: Fix sqlite3.Connection.iterdump on tables and fields |