Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

[py2.7.3] Fix for markup parser issues

  • Loading branch information...
commit c28b7ed18a1914eeeeb62d2514f775fe4552e2ae 1 parent 6f39e5f
@mattbasta authored
View
4 tests/resources/markup/markuptester/bad.xml
@@ -1,4 +0,0 @@
-<?xml version="1.0"?>
-<xml>
- <thisisa<broken<>>>
-</xml>
View
7 tests/resources/markup/markuptester/bad_script.xml
@@ -1,7 +0,0 @@
-<?xml version="1.0"?>
-<xml>
- <!-- There should be comments in the script tag. -->
- <testscript>
- <whatever
- </testscript>
-</xml>
View
14 tests/test_markup_markuptester.py
@@ -28,7 +28,7 @@ def _test_xul_raw(data, path, should_fail=False, type_=None):
if should_fail:
assert err.failed()
else:
- assert not err.failed()
+ assert not err.failed(fail_on_warnings=False)
return err
@@ -225,18 +225,10 @@ def test_lp_remote():
True, PACKAGE_THEME)
-def test_invalid_markup():
- "Tests an markup file that is simply broken."
-
- # Test for the banned test element
+def test_banned_markup():
+ "Tests that banned elements are flagged."
_test_xul("tests/resources/markup/markuptester/bad_banned.xml", True)
- result = _test_xul("tests/resources/markup/markuptester/bad.xml", True)
- assert result.warnings
- result = _test_xul("tests/resources/markup/markuptester/bad_script.xml",
- False)
- assert result.notices
-
def test_bad_encoding():
"""Test that bad encodings don't cause the parser to fail."""
View
472 validator/python/HTMLParser.py
@@ -0,0 +1,472 @@
+"""A parser for HTML and XHTML."""
+
+# This file is based on sgmllib.py, but the API is slightly different.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).
+
+
+import markupbase
+import re
+
+# Regular expressions used for parsing
+
+interesting_normal = re.compile('[&<]')
+incomplete = re.compile('&[a-zA-Z#]')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+
+starttagopen = re.compile('<[a-zA-Z]')
+piclose = re.compile('>')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
+# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
+# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
+tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
+
+attrfind = re.compile(
+ r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
+ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
+
+locatestarttagend = re.compile(r"""
+ <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
+ (?:[\s/]* # optional whitespace before attribute name
+ (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
+ (?:\s*=+\s* # value indicator
+ (?:'[^']*' # LITA-enclosed value
+ |"[^"]*" # LIT-enclosed value
+ |(?!['"])[^>\s]* # bare value
+ )
+ )?(?:\s|/(?!>))*
+ )*
+ )?
+ \s* # trailing whitespace
+""", re.VERBOSE)
+endendtag = re.compile('>')
+# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
+# </ and the tag name, so maybe this should be fixed
+endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+
+
+class HTMLParseError(Exception):
+ """Exception raised for all parse errors."""
+
+ def __init__(self, msg, position=(None, None)):
+ assert msg
+ self.msg = msg
+ self.lineno = position[0]
+ self.offset = position[1]
+
+ def __str__(self):
+ result = self.msg
+ if self.lineno is not None:
+ result = result + ", at line %d" % self.lineno
+ if self.offset is not None:
+ result = result + ", column %d" % (self.offset + 1)
+ return result
+
+
+class HTMLParser(markupbase.ParserBase):
+ """Find tags and other markup and call handler functions.
+
+ Usage:
+ p = HTMLParser()
+ p.feed(data)
+ ...
+ p.close()
+
+ Start tags are handled by calling self.handle_starttag() or
+ self.handle_startendtag(); end tags by self.handle_endtag(). The
+ data between tags is passed from the parser to the derived class
+ by calling self.handle_data() with the data as argument (the data
+ may be split up in arbitrary chunks). Entity references are
+ passed by calling self.handle_entityref() with the entity
+ reference as the argument. Numeric character references are
+ passed to self.handle_charref() with the string containing the
+ reference as the argument.
+ """
+
+ CDATA_CONTENT_ELEMENTS = ("script", "style")
+
+
+ def __init__(self):
+ """Initialize and reset this instance."""
+ self.reset()
+
+ def reset(self):
+ """Reset this instance. Loses all unprocessed data."""
+ self.rawdata = ''
+ self.lasttag = '???'
+ self.interesting = interesting_normal
+ self.cdata_elem = None
+ markupbase.ParserBase.reset(self)
+
+ def feed(self, data):
+ r"""Feed data to the parser.
+
+ Call this as often as you want, with as little or as much text
+ as you want (may include '\n').
+ """
+ self.rawdata = self.rawdata + data
+ self.goahead(0)
+
+ def close(self):
+ """Handle any buffered data."""
+ self.goahead(1)
+
+ def error(self, message):
+ raise HTMLParseError(message, self.getpos())
+
+ __starttag_text = None
+
+ def get_starttag_text(self):
+ """Return full source of start tag: '<...>'."""
+ return self.__starttag_text
+
+ def set_cdata_mode(self, elem):
+ self.cdata_elem = elem.lower()
+ self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
+
+ def clear_cdata_mode(self):
+ self.interesting = interesting_normal
+ self.cdata_elem = None
+
+ # Internal -- handle data as far as reasonable. May leave state
+ # and data to be processed by a subsequent call. If 'end' is
+ # true, force handling all data as if followed by EOF marker.
+ def goahead(self, end):
+ rawdata = self.rawdata
+ i = 0
+ n = len(rawdata)
+ while i < n:
+ match = self.interesting.search(rawdata, i) # < or &
+ if match:
+ j = match.start()
+ else:
+ if self.cdata_elem:
+ break
+ j = n
+ if i < j: self.handle_data(rawdata[i:j])
+ i = self.updatepos(i, j)
+ if i == n: break
+ startswith = rawdata.startswith
+ if startswith('<', i):
+ if starttagopen.match(rawdata, i): # < + letter
+ k = self.parse_starttag(i)
+ elif startswith("</", i):
+ k = self.parse_endtag(i)
+ elif startswith("<!--", i):
+ k = self.parse_comment(i)
+ elif startswith("<?", i):
+ k = self.parse_pi(i)
+ elif startswith("<!", i):
+ k = self.parse_html_declaration(i)
+ elif (i + 1) < n:
+ self.handle_data("<")
+ k = i + 1
+ else:
+ break
+ if k < 0:
+ if not end:
+ break
+ k = rawdata.find('>', i + 1)
+ if k < 0:
+ k = rawdata.find('<', i + 1)
+ if k < 0:
+ k = i + 1
+ else:
+ k += 1
+ self.handle_data(rawdata[i:k])
+ i = self.updatepos(i, k)
+ elif startswith("&#", i):
+ match = charref.match(rawdata, i)
+ if match:
+ name = match.group()[2:-1]
+ self.handle_charref(name)
+ k = match.end()
+ if not startswith(';', k-1):
+ k = k - 1
+ i = self.updatepos(i, k)
+ continue
+ else:
+ if ";" in rawdata[i:]: #bail by consuming &#
+ self.handle_data(rawdata[0:2])
+ i = self.updatepos(i, 2)
+ break
+ elif startswith('&', i):
+ match = entityref.match(rawdata, i)
+ if match:
+ name = match.group(1)
+ self.handle_entityref(name)
+ k = match.end()
+ if not startswith(';', k-1):
+ k = k - 1
+ i = self.updatepos(i, k)
+ continue
+ match = incomplete.match(rawdata, i)
+ if match:
+ # match.group() will contain at least 2 chars
+ if end and match.group() == rawdata[i:]:
+ self.error("EOF in middle of entity or char ref")
+ # incomplete
+ break
+ elif (i + 1) < n:
+ # not the end of the buffer, and can't be confused
+ # with some other construct
+ self.handle_data("&")
+ i = self.updatepos(i, i + 1)
+ else:
+ break
+ else:
+ assert 0, "interesting.search() lied"
+ # end while
+ if end and i < n and not self.cdata_elem:
+ self.handle_data(rawdata[i:n])
+ i = self.updatepos(i, n)
+ self.rawdata = rawdata[i:]
+
+ # Internal -- parse html declarations, return length or -1 if not terminated
+ # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
+ # See also parse_declaration in _markupbase
+ def parse_html_declaration(self, i):
+ rawdata = self.rawdata
+ if rawdata[i:i+2] != '<!':
+ self.error('unexpected call to parse_html_declaration()')
+ if rawdata[i:i+4] == '<!--':
+ # this case is actually already handled in goahead()
+ return self.parse_comment(i)
+ elif rawdata[i:i+3] == '<![':
+ return self.parse_marked_section(i)
+ elif rawdata[i:i+9].lower() == '<!doctype':
+ # find the closing >
+ gtpos = rawdata.find('>', i+9)
+ if gtpos == -1:
+ return -1
+ self.handle_decl(rawdata[i+2:gtpos])
+ return gtpos+1
+ else:
+ return self.parse_bogus_comment(i)
+
+ # Internal -- parse bogus comment, return length or -1 if not terminated
+ # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
+ def parse_bogus_comment(self, i, report=1):
+ rawdata = self.rawdata
+ if rawdata[i:i+2] not in ('<!', '</'):
+ self.error('unexpected call to parse_comment()')
+ pos = rawdata.find('>', i+2)
+ if pos == -1:
+ return -1
+ if report:
+ self.handle_comment(rawdata[i+2:pos])
+ return pos + 1
+
+ # Internal -- parse processing instr, return end or -1 if not terminated
+ def parse_pi(self, i):
+ rawdata = self.rawdata
+ assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
+ match = piclose.search(rawdata, i+2) # >
+ if not match:
+ return -1
+ j = match.start()
+ self.handle_pi(rawdata[i+2: j])
+ j = match.end()
+ return j
+
+ # Internal -- handle starttag, return end or -1 if not terminated
+ def parse_starttag(self, i):
+ self.__starttag_text = None
+ endpos = self.check_for_whole_start_tag(i)
+ if endpos < 0:
+ return endpos
+ rawdata = self.rawdata
+ self.__starttag_text = rawdata[i:endpos]
+
+ # Now parse the data between i+1 and j into a tag and attrs
+ attrs = []
+ match = tagfind.match(rawdata, i+1)
+ assert match, 'unexpected call to parse_starttag()'
+ k = match.end()
+ self.lasttag = tag = match.group(1).lower()
+
+ while k < endpos:
+ m = attrfind.match(rawdata, k)
+ if not m:
+ break
+ attrname, rest, attrvalue = m.group(1, 2, 3)
+ if not rest:
+ attrvalue = None
+ elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+ attrvalue[:1] == '"' == attrvalue[-1:]:
+ attrvalue = attrvalue[1:-1]
+ if attrvalue:
+ attrvalue = self.unescape(attrvalue)
+ attrs.append((attrname.lower(), attrvalue))
+ k = m.end()
+
+ end = rawdata[k:endpos].strip()
+ if end not in (">", "/>"):
+ lineno, offset = self.getpos()
+ if "\n" in self.__starttag_text:
+ lineno = lineno + self.__starttag_text.count("\n")
+ offset = len(self.__starttag_text) \
+ - self.__starttag_text.rfind("\n")
+ else:
+ offset = offset + len(self.__starttag_text)
+ self.handle_data(rawdata[i:endpos])
+ return endpos
+ if end.endswith('/>'):
+ # XHTML-style empty tag: <span attr="value" />
+ self.handle_startendtag(tag, attrs)
+ else:
+ self.handle_starttag(tag, attrs)
+ if tag in self.CDATA_CONTENT_ELEMENTS:
+ self.set_cdata_mode(tag)
+ return endpos
+
+ # Internal -- check to see if we have a complete starttag; return end
+ # or -1 if incomplete.
+ def check_for_whole_start_tag(self, i):
+ rawdata = self.rawdata
+ m = locatestarttagend.match(rawdata, i)
+ if m:
+ j = m.end()
+ next = rawdata[j:j+1]
+ if next == ">":
+ return j + 1
+ if next == "/":
+ if rawdata.startswith("/>", j):
+ return j + 2
+ if rawdata.startswith("/", j):
+ # buffer boundary
+ return -1
+ # else bogus input
+ self.updatepos(i, j + 1)
+ self.error("malformed empty start tag")
+ if next == "":
+ # end of input
+ return -1
+ if next in ("abcdefghijklmnopqrstuvwxyz=/"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
+ # end of input in or before attribute value, or we have the
+ # '/' from a '/>' ending
+ return -1
+ if j > i:
+ return j
+ else:
+ return i + 1
+ raise AssertionError("we should not get here!")
+
+ # Internal -- parse endtag, return end or -1 if incomplete
+ def parse_endtag(self, i):
+ rawdata = self.rawdata
+ assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
+ match = endendtag.search(rawdata, i+1) # >
+ if not match:
+ return -1
+ gtpos = match.end()
+ match = endtagfind.match(rawdata, i) # </ + tag + >
+ if not match:
+ if self.cdata_elem is not None:
+ self.handle_data(rawdata[i:gtpos])
+ return gtpos
+ # find the name: w3.org/TR/html5/tokenization.html#tag-name-state
+ namematch = tagfind_tolerant.match(rawdata, i+2)
+ if not namematch:
+ # w3.org/TR/html5/tokenization.html#end-tag-open-state
+ if rawdata[i:i+3] == '</>':
+ return i+3
+ else:
+ return self.parse_bogus_comment(i)
+ tagname = namematch.group().lower()
+ # consume and ignore other stuff between the name and the >
+ # Note: this is not 100% correct, since we might have things like
+ # </tag attr=">">, but looking for > after tha name should cover
+ # most of the cases and is much simpler
+ gtpos = rawdata.find('>', namematch.end())
+ self.handle_endtag(tagname)
+ return gtpos+1
+
+ elem = match.group(1).lower() # script or style
+ if self.cdata_elem is not None:
+ if elem != self.cdata_elem:
+ self.handle_data(rawdata[i:gtpos])
+ return gtpos
+
+ self.handle_endtag(elem)
+ self.clear_cdata_mode()
+ return gtpos
+
+ # Overridable -- finish processing of start+end tag: <tag.../>
+ def handle_startendtag(self, tag, attrs):
+ self.handle_starttag(tag, attrs)
+ self.handle_endtag(tag)
+
+ # Overridable -- handle start tag
+ def handle_starttag(self, tag, attrs):
+ pass
+
+ # Overridable -- handle end tag
+ def handle_endtag(self, tag):
+ pass
+
+ # Overridable -- handle character reference
+ def handle_charref(self, name):
+ pass
+
+ # Overridable -- handle entity reference
+ def handle_entityref(self, name):
+ pass
+
+ # Overridable -- handle data
+ def handle_data(self, data):
+ pass
+
+ # Overridable -- handle comment
+ def handle_comment(self, data):
+ pass
+
+ # Overridable -- handle declaration
+ def handle_decl(self, decl):
+ pass
+
+ # Overridable -- handle processing instruction
+ def handle_pi(self, data):
+ pass
+
+ def unknown_decl(self, data):
+ pass
+
+ # Internal -- helper to remove special character quoting
+ entitydefs = None
+ def unescape(self, s):
+ if '&' not in s:
+ return s
+ def replaceEntities(s):
+ s = s.groups()[0]
+ try:
+ if s[0] == "#":
+ s = s[1:]
+ if s[0] in ['x','X']:
+ c = int(s[1:], 16)
+ else:
+ c = int(s)
+ return unichr(c)
+ except ValueError:
+ return '&#'+s+';'
+ else:
+ # Cannot use name2codepoint directly, because HTMLParser supports apos,
+ # which is not part of HTML 4
+ import htmlentitydefs
+ if HTMLParser.entitydefs is None:
+ entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
+ for k, v in htmlentitydefs.name2codepoint.iteritems():
+ entitydefs[k] = unichr(v)
+ try:
+ return self.entitydefs[s]
+ except KeyError:
+ return '&'+s+';'
+
+ return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
View
6 validator/python/NOTICE.md
@@ -0,0 +1,6 @@
+Notice
+======
+
+The files in this directory are extracted from the CPython standard library.
+They are included solely for compatibility purposes, and their respective
+copyrights and licenses are held and controlled by the original developer(s).
View
108 validator/testcases/markup/markuptester.py
@@ -1,10 +1,8 @@
import re
import sys
import types
-try:
- import HTMLParser as htmlparser
-except ImportError: # pragma: no cover
- import html.parser as htmlparser
+
+from validator.python.HTMLParser import HTMLParser
import validator.testcases.scripting as scripting
import validator.unicodehelper as unicodehelper
@@ -38,11 +36,11 @@
_markedsectionclose = re.compile(r']\s*]\s*>')
-class MarkupParser(htmlparser.HTMLParser):
+class MarkupParser(HTMLParser):
"""Parse and analyze the versious components of markup files."""
def __init__(self, err, strict=True, debug=False):
- htmlparser.HTMLParser.__init__(self)
+ HTMLParser.__init__(self)
self.err = err
self.is_jetpack = "is_jetpack" in err.metadata # Cache this value.
self.line = 0
@@ -138,24 +136,6 @@ def _feed_parser(self, line):
if "markup" in self.reported:
return
- if ("script" in self.xml_state or
- self.debug and "testscript" in self.xml_state):
- if "script_comments" in self.reported or not self.strict:
- return
- self.err.notice(
- err_id=("markup", "_feed", "missing_script_comments"),
- notice="Missing comments in <script> tag",
- description="Markup parsing errors occurred while trying "
- "to parse the file. This would likely be "
- "mitigated by wrapping <script> tag contents "
- "in HTML comment tags (<!-- -->)",
- filename=self.filename,
- line=self.line,
- context=self.context,
- tier=2)
- self.reported.add("script_comments")
- return
-
if self.strict:
self.err.warning(
err_id=("markup", "_feed", "parse_error"),
@@ -574,6 +554,7 @@ def handle_data(self, data):
def handle_comment(self, data):
self._save_to_buffer(data)
+ # This code comes from markupbase.
def parse_marked_section(self, i, report=0):
rawdata = self.rawdata
@@ -625,82 +606,3 @@ def _is_url_local(self, url):
if url.startswith("chrome://"):
return True
return not REMOTE_URL_PATTERN.match(url)
-
- # Code to fix for Python issue 670664
-
- def parse_starttag(self, i):
- self.__starttag_text = None
- endpos = self.check_for_whole_start_tag(i)
- if endpos < 0:
- return endpos
- rawdata = self.rawdata
- self.__starttag_text = rawdata[i:endpos]
-
- # Now parse the data between i+1 and j into a tag and attrs
- attrs = []
- match = htmlparser.tagfind.match(rawdata, i+1)
- assert match, 'unexpected call to parse_starttag()'
- k = match.end()
- self.lasttag = tag = rawdata[i+1:k].lower()
-
- while k < endpos:
- m = htmlparser.attrfind.match(rawdata, k)
- if not m:
- break
- attrname, rest, attrvalue = m.group(1, 2, 3)
- if not rest:
- attrvalue = None
- elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
- attrvalue[:1] == '"' == attrvalue[-1:]:
- attrvalue = attrvalue[1:-1]
- attrvalue = self.unescape(attrvalue)
- attrs.append((attrname.lower(), attrvalue))
- k = m.end()
-
- end = rawdata[k:endpos].strip()
- if end not in (">", "/>"):
- lineno, offset = self.getpos()
- if "\n" in self.__starttag_text:
- lineno = lineno + self.__starttag_text.count("\n")
- offset = len(self.__starttag_text) \
- - self.__starttag_text.rfind("\n")
- else:
- offset = offset + len(self.__starttag_text)
- self.error("junk characters in start tag: %r"
- % (rawdata[k:endpos][:20],))
- if end.endswith('/>'):
- # XHTML-style empty tag: <span attr="value" />
- self.handle_startendtag(tag, attrs)
- else:
- self.handle_starttag(tag, attrs)
- if tag in self.CDATA_CONTENT_ELEMENTS:
- self.set_cdata_mode(tag)
- return endpos
-
- def parse_endtag(self, i):
- rawdata = self.rawdata
- assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
- match = htmlparser.endendtag.search(rawdata, i+1) # >
- if not match:
- return -1
- j = match.end()
- match = htmlparser.endtagfind.match(rawdata, i) # </ + tag + >
- if not match:
- if self.cdata_tag is not None:
- self.handle_data(rawdata[i:j])
- return j
- self.error("bad end tag: %r" % (rawdata[i:j],))
- tag = match.group(1).strip()
-
- if self.cdata_tag is not None and tag.lower() != self.cdata_tag:
- self.handle_data(rawdata[i:j])
- return j
-
- self.handle_endtag(tag.lower())
- self.clear_cdata_mode()
- return j
-
- def set_cdata_mode(self, tag):
- self.interesting = htmlparser.interesting_cdata
- self.cdata_tag = None
-
Please sign in to comment.
Something went wrong with that request. Please try again.