Commit b8147452 authored by Ezio Melotti's avatar Ezio Melotti

#19480: HTMLParser now accepts all valid start-tag names as defined by the HTML5 standard.

parent a6912197
...@@ -22,9 +22,12 @@ charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') ...@@ -22,9 +22,12 @@ charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]') starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>') piclose = re.compile('>')
commentclose = re.compile(r'--\s*>') commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
# note: if you change tagfind/attrfind remember to update locatestarttagend too
tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
# this regex is currently unused, but left for backward compatibility
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile( attrfind = re.compile(
...@@ -32,7 +35,7 @@ attrfind = re.compile( ...@@ -32,7 +35,7 @@ attrfind = re.compile(
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r""" locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name <[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name (?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator (?:\s*=+\s* # value indicator
...@@ -373,14 +376,14 @@ class HTMLParser(markupbase.ParserBase): ...@@ -373,14 +376,14 @@ class HTMLParser(markupbase.ParserBase):
self.handle_data(rawdata[i:gtpos]) self.handle_data(rawdata[i:gtpos])
return gtpos return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state # find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2) namematch = tagfind.match(rawdata, i+2)
if not namematch: if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state # w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>': if rawdata[i:i+3] == '</>':
return i+3 return i+3
else: else:
return self.parse_bogus_comment(i) return self.parse_bogus_comment(i)
tagname = namematch.group().lower() tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the > # consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like # Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover # </tag attr=">">, but looking for > after tha name should cover
......
...@@ -206,8 +206,7 @@ text ...@@ -206,8 +206,7 @@ text
self._run_check("</$>", [('comment', '$')]) self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')]) self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')]) self._run_check("</a", [('data', '</a')])
# XXX this might be wrong self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("<a<a>", [('data', '<a'), ('starttag', 'a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')]) self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')]) self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')]) self._run_check("<a", [('data', '<a')])
...@@ -215,6 +214,11 @@ text ...@@ -215,6 +214,11 @@ text
self._run_check("<a foo='bar", [('data', "<a foo='bar")]) self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")]) self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")]) self._run_check("<a foo='>", [('data', "<a foo='>")])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
def test_valid_doctypes(self): def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html # from http://www.w3.org/QA/2002/04/valid-dtd-list.html
......
...@@ -12,6 +12,9 @@ Core and Builtins ...@@ -12,6 +12,9 @@ Core and Builtins
Library Library
------- -------
- Issue #19480: HTMLParser now accepts all valid start-tag names as defined
by the HTML5 standard.
- Issue #17827: Add the missing documentation for ``codecs.encode`` and - Issue #17827: Add the missing documentation for ``codecs.encode`` and
``codecs.decode``. ``codecs.decode``.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment