From 41c90aeba6bc917f4d7303803e7c9829cf3dfaa9 Mon Sep 17 00:00:00 2001 From: Geoffrey Sneddon Date: Tue, 8 Apr 2014 00:01:25 +0100 Subject: [PATCH] Fix #146; fix all pep8 1.5.4 failures --- html5lib/html5parser.py | 3 +-- html5lib/sanitizer.py | 2 +- html5lib/serializer/htmlserializer.py | 4 ++-- html5lib/tests/test_tokenizer.py | 6 +++--- html5lib/treebuilders/dom.py | 2 +- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/html5lib/html5parser.py b/html5lib/html5parser.py index 9be3e134..5b9ce7d7 100644 --- a/html5lib/html5parser.py +++ b/html5lib/html5parser.py @@ -1216,8 +1216,7 @@ def startTagIsIndex(self, token): attributes["name"] = "isindex" self.processStartTag(impliedTagToken("input", "StartTag", attributes=attributes, - selfClosing= - token["selfClosing"])) + selfClosing=token["selfClosing"])) self.processEndTag(impliedTagToken("label")) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processEndTag(impliedTagToken("form")) diff --git a/html5lib/sanitizer.py b/html5lib/sanitizer.py index 71dc5212..469d9b40 100644 --- a/html5lib/sanitizer.py +++ b/html5lib/sanitizer.py @@ -245,7 +245,7 @@ def sanitize_css(self, style): elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): - if not keyword in self.acceptable_css_keywords and \ + if keyword not in self.acceptable_css_keywords and \ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): break else: diff --git a/html5lib/serializer/htmlserializer.py b/html5lib/serializer/htmlserializer.py index 412a5a22..4a891ff5 100644 --- a/html5lib/serializer/htmlserializer.py +++ b/html5lib/serializer/htmlserializer.py @@ -35,7 +35,7 @@ v = utils.surrogatePairToCodepoint(v) else: v = ord(v) - if not v in encode_entity_map or k.islower(): + if v not in encode_entity_map or k.islower(): # prefer < over < and similarly for &, >, etc. encode_entity_map[v] = k @@ -291,7 +291,7 @@ def serialize(self, treewalker, encoding=None): elif type == "Entity": name = token["name"] key = name + ";" - if not key in entities: + if key not in entities: self.serializeError(_("Entity %s not recognized" % name)) if self.resolve_entities and key not in xmlEntities: data = entities[key] diff --git a/html5lib/tests/test_tokenizer.py b/html5lib/tests/test_tokenizer.py index 7d7b5258..90315ab3 100644 --- a/html5lib/tests/test_tokenizer.py +++ b/html5lib/tests/test_tokenizer.py @@ -68,8 +68,8 @@ def processParseError(self, token): def concatenateCharacterTokens(tokens): outputTokens = [] for token in tokens: - if not "ParseError" in token and token[0] == "Character": - if (outputTokens and not "ParseError" in outputTokens[-1] and + if "ParseError" not in token and token[0] == "Character": + if (outputTokens and "ParseError" not in outputTokens[-1] and outputTokens[-1][0] == "Character"): outputTokens[-1][1] += token[1] else: @@ -112,7 +112,7 @@ def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder, # Sort the tokens into two groups; non-parse errors and parse errors tokens = {"expected": [[], []], "received": [[], []]} for tokenType, tokenList in zip(list(tokens.keys()), - (expectedTokens, receivedTokens)): + (expectedTokens, receivedTokens)): for token in tokenList: if token != "ParseError": tokens[tokenType][0].append(token) diff --git a/html5lib/treebuilders/dom.py b/html5lib/treebuilders/dom.py index 61e5ed79..234233b7 100644 --- a/html5lib/treebuilders/dom.py +++ b/html5lib/treebuilders/dom.py @@ -158,7 +158,7 @@ def insertText(self, data, parent=None): else: # HACK: allow text nodes as children of the document node if hasattr(self.dom, '_child_node_types'): - if not Node.TEXT_NODE in self.dom._child_node_types: + if Node.TEXT_NODE not in self.dom._child_node_types: self.dom._child_node_types = list(self.dom._child_node_types) self.dom._child_node_types.append(Node.TEXT_NODE) self.dom.appendChild(self.dom.createTextNode(data))