diff --git a/.appveyor.yml b/.appveyor.yml
index 3a032f75..b394474d 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -4,8 +4,6 @@ environment:
     PATH: "C:\\Python27\\Scripts\\;%PATH%"
     PYTEST_COMMAND: "coverage run -m pytest"
   matrix:
-    - TOXENV: py27-base
-    - TOXENV: py27-optional
     - TOXENV: py35-base
     - TOXENV: py35-optional
     - TOXENV: py36-base
@@ -21,7 +19,7 @@ test_script:
   - tox
 
 after_test:
-  - python debug-info.py
+  - C:\\Python35\\python.exe debug-info.py
 
 on_success:
   - codecov
diff --git a/.travis.yml b/.travis.yml
index 3d87fe5f..b50061d7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,10 +1,8 @@
 language: python
 python:
   - "pypy3"
-  - "pypy"
   - "3.6"
   - "3.5"
-  - "2.7"
 
 cache: pip
 
@@ -14,7 +12,6 @@ env:
   matrix:
     - TOXENV=optional
     - TOXENV=base
-    - TOXENV=six19-optional
 
 matrix:
   include:
@@ -24,9 +21,6 @@ matrix:
     - python: "3.7"
       dist: xenial  # required for Python >= 3.7
       env: TOXENV=base
-    - python: "3.7"
-      dist: xenial  # required for Python >= 3.7
-      env: TOXENV=six19-optional
 
 install:
   - pip install tox codecov
diff --git a/CHANGES.rst b/CHANGES.rst
index 82605a21..3ea3d84d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,6 +1,11 @@
 Change Log
 ----------
 
+UNRELEASED
+----------
+
+* Remove support for end-of-life Python 2.7.
+
 1.0.1
 ~~~~~
 
diff --git a/README.rst b/README.rst
index 095e5f92..b9b2ef4c 100644
--- a/README.rst
+++ b/README.rst
@@ -91,8 +91,7 @@ More documentation is available at https://html5lib.readthedocs.io/.
 Installation
 ------------
 
-html5lib works on CPython 2.7+, CPython 3.4+ and PyPy.  To install it,
-use:
+html5lib works on CPython 3.5+ and PyPy. To install it, use:
 
 .. code-block:: bash
 
@@ -128,8 +127,8 @@ Please report any bugs on the `issue tracker
 Tests
 -----
 
-Unit tests require the ``pytest`` and ``mock`` libraries and can be
-run using the ``py.test`` command in the root directory.
+Unit tests require the ``pytest`` library and can be run using the ``py.test``
+command in the root directory.
 
 Test data are contained in a separate `html5lib-tests
 <https://github.com/html5lib/html5lib-tests>`_ repository and included
diff --git a/debug-info.py b/debug-info.py
index f93fbdbe..f5a97726 100644
--- a/debug-info.py
+++ b/debug-info.py
@@ -1,5 +1,3 @@
-from __future__ import print_function, unicode_literals
-
 import platform
 import sys
 
@@ -12,7 +10,7 @@
     "maxsize": sys.maxsize
 }
 
-search_modules = ["chardet", "datrie", "genshi", "html5lib", "lxml", "six"]
+search_modules = ["chardet", "datrie", "genshi", "html5lib", "lxml"]
 found_modules = []
 
 for m in search_modules:
diff --git a/doc/conf.py b/doc/conf.py
index e02218b8..3c0bc93d 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
 #
 # html5lib documentation build configuration file, created by
 # sphinx-quickstart on Wed May  8 00:04:49 2013.
@@ -250,7 +249,7 @@
 # If true, do not generate a @detailmenu in the "Top" node's menu.
 #texinfo_no_detailmenu = False
 
-class CExtMock(object):
+class CExtMock:
     """Required for autodoc on readthedocs.org where you cannot build C extensions."""
     def __init__(self, *args, **kwargs):
         pass
diff --git a/html5lib/__init__.py b/html5lib/__init__.py
index 373fb925..88a97ac4 100644
--- a/html5lib/__init__.py
+++ b/html5lib/__init__.py
@@ -20,7 +20,6 @@
 * :func:`~.serializer.serialize`
 """
 
-from __future__ import absolute_import, division, unicode_literals
 
 from .html5parser import HTMLParser, parse, parseFragment
 from .treebuilders import getTreeBuilder
diff --git a/html5lib/_ihatexml.py b/html5lib/_ihatexml.py
index a7aa72e8..ac15679a 100644
--- a/html5lib/_ihatexml.py
+++ b/html5lib/_ihatexml.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import re
 import warnings
 
@@ -183,7 +181,7 @@ def escapeRegexp(string):
 nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]")
 
 
-class InfosetFilter(object):
+class InfosetFilter:
     replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
 
     def __init__(self,
diff --git a/html5lib/_inputstream.py b/html5lib/_inputstream.py
index b8021291..058d14dc 100644
--- a/html5lib/_inputstream.py
+++ b/html5lib/_inputstream.py
@@ -1,11 +1,8 @@
-from __future__ import absolute_import, division, unicode_literals
-
-from six import text_type
-from six.moves import http_client, urllib
-
+import urllib.response
 import codecs
 import re
 from io import BytesIO, StringIO
+import http.client
 
 import webencodings
 
@@ -48,7 +45,7 @@
 charsUntilRegEx = {}
 
 
-class BufferedStream(object):
+class BufferedStream:
     """Buffering for streams that do not have buffering of their own
 
     The buffer is implemented as a list of chunks on the assumption that
@@ -125,15 +122,15 @@ def _readFromBuffer(self, bytes):
 def HTMLInputStream(source, **kwargs):
     # Work around Python bug #20007: read(0) closes the connection.
     # http://bugs.python.org/issue20007
-    if (isinstance(source, http_client.HTTPResponse) or
+    if (isinstance(source, http.client.HTTPResponse) or
         # Also check for addinfourl wrapping HTTPResponse
         (isinstance(source, urllib.response.addbase) and
-         isinstance(source.fp, http_client.HTTPResponse))):
+         isinstance(source.fp, http.client.HTTPResponse))):
         isUnicode = False
     elif hasattr(source, "read"):
-        isUnicode = isinstance(source.read(0), text_type)
+        isUnicode = isinstance(source.read(0), str)
     else:
-        isUnicode = isinstance(source, text_type)
+        isUnicode = isinstance(source, str)
 
     if isUnicode:
         encodings = [x for x in kwargs if x.endswith("_encoding")]
@@ -145,7 +142,7 @@ def HTMLInputStream(source, **kwargs):
         return HTMLBinaryInputStream(source, **kwargs)
 
 
-class HTMLUnicodeInputStream(object):
+class HTMLUnicodeInputStream:
     """Provides a unicode stream of characters to the HTMLTokenizer.
 
     This class takes care of character encoding and removing or replacing
@@ -598,10 +595,6 @@ def __next__(self):
             raise TypeError
         return self[p:p + 1]
 
-    def next(self):
-        # Py2 compat
-        return self.__next__()
-
     def previous(self):
         p = self._position
         if p >= len(self):
@@ -679,7 +672,7 @@ def jumpTo(self, bytes):
             raise StopIteration
 
 
-class EncodingParser(object):
+class EncodingParser:
     """Mini parser for detecting character encoding from meta elements"""
 
     def __init__(self, data):
@@ -860,7 +853,7 @@ def getAttribute(self):
                 attrValue.append(c)
 
 
-class ContentAttrParser(object):
+class ContentAttrParser:
     def __init__(self, data):
         assert isinstance(data, bytes)
         self.data = data
diff --git a/html5lib/_tokenizer.py b/html5lib/_tokenizer.py
index 6078f66a..2337d89b 100644
--- a/html5lib/_tokenizer.py
+++ b/html5lib/_tokenizer.py
@@ -1,7 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
-from six import unichr as chr
-
 from collections import deque
 
 from .constants import spaceCharacters
@@ -18,7 +14,7 @@
 entitiesTrie = Trie(entities)
 
 
-class HTMLTokenizer(object):
+class HTMLTokenizer:
     """ This class takes care of tokenizing HTML.
 
     * self.currentToken
@@ -44,7 +40,7 @@ def __init__(self, stream, parser=None, **kwargs):
 
         # The current token being created
         self.currentToken = None
-        super(HTMLTokenizer, self).__init__()
+        super().__init__()
 
     def __iter__(self):
         """ This is where the magic happens.
diff --git a/html5lib/_trie/__init__.py b/html5lib/_trie/__init__.py
index a5ba4bf1..2b5d0f66 100644
--- a/html5lib/_trie/__init__.py
+++ b/html5lib/_trie/__init__.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from .py import Trie as PyTrie
 
 Trie = PyTrie
diff --git a/html5lib/_trie/_base.py b/html5lib/_trie/_base.py
index 6b71975f..6b2977b2 100644
--- a/html5lib/_trie/_base.py
+++ b/html5lib/_trie/_base.py
@@ -1,9 +1,4 @@
-from __future__ import absolute_import, division, unicode_literals
-
-try:
-    from collections.abc import Mapping
-except ImportError:  # Python 2.7
-    from collections import Mapping
+from collections.abc import Mapping
 
 
 class Trie(Mapping):
@@ -11,7 +6,7 @@ class Trie(Mapping):
 
     def keys(self, prefix=None):
         # pylint:disable=arguments-differ
-        keys = super(Trie, self).keys()
+        keys = super().keys()
 
         if prefix is None:
             return set(keys)
diff --git a/html5lib/_trie/datrie.py b/html5lib/_trie/datrie.py
index 51f3d046..15e7cc7b 100644
--- a/html5lib/_trie/datrie.py
+++ b/html5lib/_trie/datrie.py
@@ -1,7 +1,4 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from datrie import Trie as DATrie
-from six import text_type
 
 from ._base import Trie as ABCTrie
 
@@ -10,7 +7,7 @@ class Trie(ABCTrie):
     def __init__(self, data):
         chars = set()
         for key in data.keys():
-            if not isinstance(key, text_type):
+            if not isinstance(key, str):
                 raise TypeError("All keys must be strings")
             for char in key:
                 chars.add(char)
diff --git a/html5lib/_trie/py.py b/html5lib/_trie/py.py
index c2ba3da7..bc6363c4 100644
--- a/html5lib/_trie/py.py
+++ b/html5lib/_trie/py.py
@@ -1,6 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-from six import text_type
-
 from bisect import bisect_left
 
 from ._base import Trie as ABCTrie
@@ -8,7 +5,7 @@
 
 class Trie(ABCTrie):
     def __init__(self, data):
-        if not all(isinstance(x, text_type) for x in data.keys()):
+        if not all(isinstance(x, str) for x in data.keys()):
             raise TypeError("All keys must be strings")
 
         self._data = data
diff --git a/html5lib/_utils.py b/html5lib/_utils.py
index 91252f2c..e0604dbc 100644
--- a/html5lib/_utils.py
+++ b/html5lib/_utils.py
@@ -1,9 +1,5 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from types import ModuleType
 
-from six import text_type
-
 try:
     import xml.etree.cElementTree as default_etree
 except ImportError:
@@ -23,10 +19,10 @@
 # escapes.
 try:
     _x = eval('"\\uD800"')  # pylint:disable=eval-used
-    if not isinstance(_x, text_type):
+    if not isinstance(_x, str):
         # We need this with u"" because of http://bugs.jython.org/issue2039
         _x = eval('u"\\uD800"')  # pylint:disable=eval-used
-        assert isinstance(_x, text_type)
+        assert isinstance(_x, str)
 except:  # pylint:disable=bare-except
     supports_lone_surrogates = False
 else:
diff --git a/html5lib/constants.py b/html5lib/constants.py
index fe3e237c..3596ea21 100644
--- a/html5lib/constants.py
+++ b/html5lib/constants.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import string
 
 EOF = None
diff --git a/html5lib/filters/alphabeticalattributes.py b/html5lib/filters/alphabeticalattributes.py
index 5ba926e3..d96ad62a 100644
--- a/html5lib/filters/alphabeticalattributes.py
+++ b/html5lib/filters/alphabeticalattributes.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from . import base
 
 from collections import OrderedDict
diff --git a/html5lib/filters/base.py b/html5lib/filters/base.py
index c7dbaed0..6937911d 100644
--- a/html5lib/filters/base.py
+++ b/html5lib/filters/base.py
@@ -1,7 +1,4 @@
-from __future__ import absolute_import, division, unicode_literals
-
-
-class Filter(object):
+class Filter:
     def __init__(self, source):
         self.source = source
 
diff --git a/html5lib/filters/inject_meta_charset.py b/html5lib/filters/inject_meta_charset.py
index aefb5c84..cfa469c3 100644
--- a/html5lib/filters/inject_meta_charset.py
+++ b/html5lib/filters/inject_meta_charset.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from . import base
 
 
diff --git a/html5lib/filters/lint.py b/html5lib/filters/lint.py
index acd4d7a2..7f317db4 100644
--- a/html5lib/filters/lint.py
+++ b/html5lib/filters/lint.py
@@ -1,7 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
-from six import text_type
-
 from . import base
 from ..constants import namespaces, voidElements
 
@@ -23,7 +19,7 @@ def __init__(self, source, require_matching_tags=True):
         :arg require_matching_tags: whether or not to require matching tags
 
         """
-        super(Filter, self).__init__(source)
+        super().__init__(source)
         self.require_matching_tags = require_matching_tags
 
     def __iter__(self):
@@ -33,9 +29,9 @@ def __iter__(self):
             if type in ("StartTag", "EmptyTag"):
                 namespace = token["namespace"]
                 name = token["name"]
-                assert namespace is None or isinstance(namespace, text_type)
+                assert namespace is None or isinstance(namespace, str)
                 assert namespace != ""
-                assert isinstance(name, text_type)
+                assert isinstance(name, str)
                 assert name != ""
                 assert isinstance(token["data"], dict)
                 if (not namespace or namespace == namespaces["html"]) and name in voidElements:
@@ -45,18 +41,18 @@ def __iter__(self):
                 if type == "StartTag" and self.require_matching_tags:
                     open_elements.append((namespace, name))
                 for (namespace, name), value in token["data"].items():
-                    assert namespace is None or isinstance(namespace, text_type)
+                    assert namespace is None or isinstance(namespace, str)
                     assert namespace != ""
-                    assert isinstance(name, text_type)
+                    assert isinstance(name, str)
                     assert name != ""
-                    assert isinstance(value, text_type)
+                    assert isinstance(value, str)
 
             elif type == "EndTag":
                 namespace = token["namespace"]
                 name = token["name"]
-                assert namespace is None or isinstance(namespace, text_type)
+                assert namespace is None or isinstance(namespace, str)
                 assert namespace != ""
-                assert isinstance(name, text_type)
+                assert isinstance(name, str)
                 assert name != ""
                 if (not namespace or namespace == namespaces["html"]) and name in voidElements:
                     assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
@@ -66,26 +62,26 @@ def __iter__(self):
 
             elif type == "Comment":
                 data = token["data"]
-                assert isinstance(data, text_type)
+                assert isinstance(data, str)
 
             elif type in ("Characters", "SpaceCharacters"):
                 data = token["data"]
-                assert isinstance(data, text_type)
+                assert isinstance(data, str)
                 assert data != ""
                 if type == "SpaceCharacters":
                     assert data.strip(spaceCharacters) == ""
 
             elif type == "Doctype":
                 name = token["name"]
-                assert name is None or isinstance(name, text_type)
-                assert token["publicId"] is None or isinstance(name, text_type)
-                assert token["systemId"] is None or isinstance(name, text_type)
+                assert name is None or isinstance(name, str)
+                assert token["publicId"] is None or isinstance(name, str)
+                assert token["systemId"] is None or isinstance(name, str)
 
             elif type == "Entity":
-                assert isinstance(token["name"], text_type)
+                assert isinstance(token["name"], str)
 
             elif type == "SerializerError":
-                assert isinstance(token["data"], text_type)
+                assert isinstance(token["data"], str)
 
             else:
                 assert False, "Unknown token type: %(type)s" % {"type": type}
diff --git a/html5lib/filters/optionaltags.py b/html5lib/filters/optionaltags.py
index 4a865012..f1c21118 100644
--- a/html5lib/filters/optionaltags.py
+++ b/html5lib/filters/optionaltags.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from . import base
 
 
diff --git a/html5lib/filters/sanitizer.py b/html5lib/filters/sanitizer.py
index e852f53b..03cb46c2 100644
--- a/html5lib/filters/sanitizer.py
+++ b/html5lib/filters/sanitizer.py
@@ -1,10 +1,7 @@
-from __future__ import absolute_import, division, unicode_literals
-
+import urllib.parse
 import re
 from xml.sax.saxutils import escape, unescape
 
-from six.moves import urllib_parse as urlparse
-
 from . import base
 from ..constants import namespaces, prefixes
 
@@ -749,7 +746,7 @@ def __init__(self,
             hrefs--these are removed
 
         """
-        super(Filter, self).__init__(source)
+        super().__init__(source)
         self.allowed_elements = allowed_elements
         self.allowed_attributes = allowed_attributes
         self.allowed_css_properties = allowed_css_properties
@@ -818,7 +815,7 @@ def allowed_token(self, token):
                 # remove replacement characters from unescaped characters
                 val_unescaped = val_unescaped.replace("\ufffd", "")
                 try:
-                    uri = urlparse.urlparse(val_unescaped)
+                    uri = urllib.parse.urlparse(val_unescaped)
                 except ValueError:
                     uri = None
                     del attrs[attr]
diff --git a/html5lib/filters/whitespace.py b/html5lib/filters/whitespace.py
index 0d12584b..2f35f4a0 100644
--- a/html5lib/filters/whitespace.py
+++ b/html5lib/filters/whitespace.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import re
 
 from . import base
diff --git a/html5lib/html5parser.py b/html5lib/html5parser.py
index 4d12d9de..5c972760 100644
--- a/html5lib/html5parser.py
+++ b/html5lib/html5parser.py
@@ -1,6 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-from six import with_metaclass, viewkeys
-
 import types
 from collections import OrderedDict
 
@@ -84,7 +81,7 @@ def __new__(meta, classname, bases, classDict):
     return Decorated
 
 
-class HTMLParser(object):
+class HTMLParser:
     """HTML parser
 
     Generates a tree structure from a stream of (possibly malformed) HTML.
@@ -442,7 +439,7 @@ def getMetaclass(use_metaclass, metaclass_func):
             return type
 
     # pylint:disable=unused-argument
-    class Phase(with_metaclass(getMetaclass(debug, log))):
+    class Phase(metaclass=getMetaclass(debug, log)):
         """Base class for helper object that implements each phase of processing
         """
 
@@ -751,7 +748,7 @@ def startTagMeta(self, token):
                     # the abstract Unicode string, and just use the
                     # ContentAttrParser on that, but using UTF-8 allows all chars
                     # to be encoded and as a ASCII-superset works.
-                    data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
+                    data = _inputstream.EncodingBytes(attributes["content"].encode())
                     parser = _inputstream.ContentAttrParser(data)
                     codec = parser.parse()
                     self.parser.tokenizer.stream.changeEncoding(codec)
@@ -2771,7 +2768,7 @@ def processEndTag(self, token):
 
 
 def adjust_attributes(token, replacements):
-    needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
+    needs_adjustment = token['data'].keys() & replacements.keys()
     if needs_adjustment:
         token['data'] = OrderedDict((replacements.get(k, k), v)
                                     for k, v in token['data'].items())
diff --git a/html5lib/serializer.py b/html5lib/serializer.py
index c66df683..c6e29c6a 100644
--- a/html5lib/serializer.py
+++ b/html5lib/serializer.py
@@ -1,6 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-from six import text_type
-
 import re
 
 from codecs import register_error, xmlcharrefreplace_errors
@@ -101,7 +98,7 @@ def serialize(input, tree="etree", encoding=None, **serializer_opts):
     return s.render(walker(input), encoding)
 
 
-class HTMLSerializer(object):
+class HTMLSerializer:
 
     # attribute quoting options
     quote_attr_values = "legacy"  # be secure by default
@@ -222,14 +219,14 @@ def __init__(self, **kwargs):
         self.strict = False
 
     def encode(self, string):
-        assert(isinstance(string, text_type))
+        assert(isinstance(string, str))
         if self.encoding:
             return string.encode(self.encoding, "htmlentityreplace")
         else:
             return string
 
     def encodeStrict(self, string):
-        assert(isinstance(string, text_type))
+        assert(isinstance(string, str))
         if self.encoding:
             return string.encode(self.encoding, "strict")
         else:
diff --git a/html5lib/tests/__init__.py b/html5lib/tests/__init__.py
index b8ce2de3..e69de29b 100644
--- a/html5lib/tests/__init__.py
+++ b/html5lib/tests/__init__.py
@@ -1 +0,0 @@
-from __future__ import absolute_import, division, unicode_literals
diff --git a/html5lib/tests/conftest.py b/html5lib/tests/conftest.py
index dad167c5..4e690917 100644
--- a/html5lib/tests/conftest.py
+++ b/html5lib/tests/conftest.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
 import os.path
 import sys
 
diff --git a/html5lib/tests/sanitizer.py b/html5lib/tests/sanitizer.py
index 93882ec4..5aa02c39 100644
--- a/html5lib/tests/sanitizer.py
+++ b/html5lib/tests/sanitizer.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import codecs
 import json
 
@@ -18,7 +16,7 @@ def collect(self):
 
 class SanitizerTest(pytest.Item):
     def __init__(self, name, parent, test):
-        super(SanitizerTest, self).__init__(name, parent)
+        super().__init__(name, parent)
         self.obj = lambda: 1  # this is to hack around skipif needing a function!
         self.test = test
 
diff --git a/html5lib/tests/support.py b/html5lib/tests/support.py
index dab65c1c..1c6ea202 100644
--- a/html5lib/tests/support.py
+++ b/html5lib/tests/support.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 # pylint:disable=wrong-import-position
 
 import os
@@ -86,7 +84,7 @@ def __getitem__(self, key):
         return dict.get(self, key, self.default)
 
 
-class TestData(object):
+class TestData:
     def __init__(self, filename, newTestHeading="data", encoding="utf8"):
         if encoding is None:
             self.f = open(filename, mode="rb")
@@ -147,11 +145,8 @@ def convertData(data):
 
 
 def errorMessage(input, expected, actual):
-    msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
-           (repr(input), repr(expected), repr(actual)))
-    if sys.version_info[0] == 2:
-        msg = msg.encode("ascii", "backslashreplace")
-    return msg
+    return ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
+            (repr(input), repr(expected), repr(actual)))
 
 
 class TracingSaxHandler(xml.sax.handler.ContentHandler):
diff --git a/html5lib/tests/test_alphabeticalattributes.py b/html5lib/tests/test_alphabeticalattributes.py
index 7d5b8e0f..3b82c2b0 100644
--- a/html5lib/tests/test_alphabeticalattributes.py
+++ b/html5lib/tests/test_alphabeticalattributes.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from collections import OrderedDict
 
 import pytest
diff --git a/html5lib/tests/test_encoding.py b/html5lib/tests/test_encoding.py
index 9a411c77..6972aa77 100644
--- a/html5lib/tests/test_encoding.py
+++ b/html5lib/tests/test_encoding.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import os
 
 import pytest
@@ -9,7 +7,7 @@
 
 
 def test_basic_prescan_length():
-    data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
+    data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode()
     pad = 1024 - len(data) + 1
     data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
     assert len(data) == 1024  # Sanity
@@ -18,7 +16,7 @@ def test_basic_prescan_length():
 
 
 def test_parser_reparse():
-    data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
+    data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode()
     pad = 10240 - len(data) + 1
     data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
     assert len(data) == 10240  # Sanity
diff --git a/html5lib/tests/test_meta.py b/html5lib/tests/test_meta.py
index e42eafdb..9a8de008 100644
--- a/html5lib/tests/test_meta.py
+++ b/html5lib/tests/test_meta.py
@@ -1,7 +1,4 @@
-from __future__ import absolute_import, division, unicode_literals
-
-import six
-from mock import Mock
+from unittest.mock import Mock
 
 from . import support
 
@@ -27,11 +24,7 @@ def test_errorMessage():
     r = support.errorMessage(input, expected, actual)
 
     # Assertions!
-    if six.PY2:
-        assert b"Input:\n1\nExpected:\n2\nRecieved\n3\n" == r
-    else:
-        assert six.PY3
-        assert "Input:\n1\nExpected:\n2\nRecieved\n3\n" == r
+    assert "Input:\n1\nExpected:\n2\nRecieved\n3\n" == r
 
     assert input.__repr__.call_count == 1
     assert expected.__repr__.call_count == 1
diff --git a/html5lib/tests/test_optionaltags_filter.py b/html5lib/tests/test_optionaltags_filter.py
index cd282149..1b054f40 100644
--- a/html5lib/tests/test_optionaltags_filter.py
+++ b/html5lib/tests/test_optionaltags_filter.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from html5lib.filters.optionaltags import Filter
 
 
diff --git a/html5lib/tests/test_parser2.py b/html5lib/tests/test_parser2.py
index bcc0bf48..a9ffba34 100644
--- a/html5lib/tests/test_parser2.py
+++ b/html5lib/tests/test_parser2.py
@@ -1,7 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
-from six import PY2, text_type, unichr
-
 import io
 
 from . import support  # noqa
@@ -57,7 +53,7 @@ def test_maintain_attribute_order():
     # This is here because we impl it in parser and not tokenizer
     p = HTMLParser()
     # generate loads to maximize the chance a hash-based mutation will occur
-    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
+    attrs = [(chr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
     token = {'name': 'html',
              'selfClosing': False,
              'selfClosingAcknowledged': False,
@@ -78,7 +74,7 @@ def test_duplicate_attribute():
 def test_maintain_duplicate_attribute_order():
     # This is here because we impl it in parser and not tokenizer
     p = HTMLParser()
-    attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
+    attrs = [(chr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))]
     token = {'name': 'html',
              'selfClosing': False,
              'selfClosingAcknowledged': False,
@@ -111,11 +107,6 @@ def test_debug_log():
                 ('dataState', 'InBodyPhase', 'InBodyPhase', 'processEndTag', {'name': 'p', 'type': 'EndTag'}),
                 ('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'})]
 
-    if PY2:
-        for i, log in enumerate(expected):
-            log = [x.encode("ascii") if isinstance(x, text_type) else x for x in log]
-            expected[i] = tuple(log)
-
     assert parser.log == expected
 
 
diff --git a/html5lib/tests/test_sanitizer.py b/html5lib/tests/test_sanitizer.py
index 45046d57..ec516bac 100644
--- a/html5lib/tests/test_sanitizer.py
+++ b/html5lib/tests/test_sanitizer.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from html5lib import constants, parseFragment, serialize
 from html5lib.filters import sanitizer
 
diff --git a/html5lib/tests/test_serializer.py b/html5lib/tests/test_serializer.py
index c23592af..4265105d 100644
--- a/html5lib/tests/test_serializer.py
+++ b/html5lib/tests/test_serializer.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import os
 import json
 
@@ -49,13 +47,11 @@ def __iter__(self):
                 else:
                     namespace = default_namespace
                     name, attrib = token[1:]
-                for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
-                    yield token
+                yield from self.emptyTag(namespace, name, self._convertAttrib(attrib))
             elif type == "Comment":
                 yield self.comment(token[1])
             elif type in ("Characters", "SpaceCharacters"):
-                for token in self.text(token[1]):
-                    yield token
+                yield from self.text(token[1])
             elif type == "Doctype":
                 if len(token) == 4:
                     yield self.doctype(token[1], token[2], token[3])
diff --git a/html5lib/tests/test_stream.py b/html5lib/tests/test_stream.py
index 27c39538..0a1839ec 100644
--- a/html5lib/tests/test_stream.py
+++ b/html5lib/tests/test_stream.py
@@ -1,5 +1,5 @@
-from __future__ import absolute_import, division, unicode_literals
-
+import urllib.response
+import http.client
 from . import support  # noqa
 
 import codecs
@@ -8,9 +8,6 @@
 
 import pytest
 
-import six
-from six.moves import http_client, urllib
-
 from html5lib._inputstream import (BufferedStream, HTMLInputStream,
                                    HTMLUnicodeInputStream, HTMLBinaryInputStream)
 from html5lib._utils import supports_lone_surrogates
@@ -105,7 +102,7 @@ def test_char_ascii():
 
 
 def test_char_utf8():
-    stream = HTMLInputStream('\u2018'.encode('utf-8'), override_encoding='utf-8')
+    stream = HTMLInputStream('\u2018'.encode(), override_encoding='utf-8')
     assert stream.charEncoding[0].name == 'utf-8'
     assert stream.char() == '\u2018'
 
@@ -186,12 +183,12 @@ def test_python_issue_20007():
     Make sure we have a work-around for Python bug #20007
     http://bugs.python.org/issue20007
     """
-    class FakeSocket(object):
+    class FakeSocket:
         def makefile(self, _mode, _bufsize=None):
             # pylint:disable=unused-argument
             return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
 
-    source = http_client.HTTPResponse(FakeSocket())
+    source = http.client.HTTPResponse(FakeSocket())
     source.begin()
     stream = HTMLInputStream(source)
     assert stream.charsUntil(" ") == "Text"
@@ -202,15 +199,12 @@ def test_python_issue_20007_b():
     Make sure we have a work-around for Python bug #20007
     http://bugs.python.org/issue20007
     """
-    if six.PY2:
-        return
-
-    class FakeSocket(object):
+    class FakeSocket:
         def makefile(self, _mode, _bufsize=None):
             # pylint:disable=unused-argument
             return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
 
-    source = http_client.HTTPResponse(FakeSocket())
+    source = http.client.HTTPResponse(FakeSocket())
     source.begin()
     wrapped = urllib.response.addinfourl(source, source.msg, "http://example.com")
     stream = HTMLInputStream(wrapped)
diff --git a/html5lib/tests/test_treeadapters.py b/html5lib/tests/test_treeadapters.py
index 95e56c00..1e396ed9 100644
--- a/html5lib/tests/test_treeadapters.py
+++ b/html5lib/tests/test_treeadapters.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from . import support  # noqa
 
 import html5lib
diff --git a/html5lib/tests/test_treewalkers.py b/html5lib/tests/test_treewalkers.py
index 67fc89e5..e949eed8 100644
--- a/html5lib/tests/test_treewalkers.py
+++ b/html5lib/tests/test_treewalkers.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import itertools
 
 import pytest
@@ -78,30 +76,6 @@ def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
         raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
 
 
-def test_treewalker_six_mix():
-    """Str/Unicode mix. If str attrs added to tree"""
-
-    # On Python 2.x string literals are of type str. Unless, like this
-    # file, the programmer imports unicode_literals from __future__.
-    # In that case, string literals become objects of type unicode.
-
-    # This test simulates a Py2 user, modifying attributes on a document
-    # fragment but not using the u'' syntax nor importing unicode_literals
-    sm_tests = [
-        ('<a href="http://example.com">Example</a>',
-         [(str('class'), str('test123'))],
-         '<a>\n  class="test123"\n  href="http://example.com"\n  "Example"'),
-
-        ('<link href="http://example.com/cow">',
-         [(str('rel'), str('alternate'))],
-         '<link>\n  href="http://example.com/cow"\n  rel="alternate"\n  "Example"')
-    ]
-
-    for tree in sorted(treeTypes.items()):
-        for intext, attrs, expected in sm_tests:
-            yield runTreewalkerEditTest, intext, expected, attrs, tree
-
-
 @pytest.mark.parametrize("tree,char", itertools.product(sorted(treeTypes.items()), ["x", "\u1234"]))
 def test_fragment_single_char(tree, char):
     expected = [
diff --git a/html5lib/tests/test_whitespace_filter.py b/html5lib/tests/test_whitespace_filter.py
index e9da6140..0daf1c52 100644
--- a/html5lib/tests/test_whitespace_filter.py
+++ b/html5lib/tests/test_whitespace_filter.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from html5lib.filters.whitespace import Filter
 from html5lib.constants import spaceCharacters
 spaceCharacters = "".join(spaceCharacters)
diff --git a/html5lib/tests/tokenizer.py b/html5lib/tests/tokenizer.py
index f93ae030..1ceb7942 100644
--- a/html5lib/tests/tokenizer.py
+++ b/html5lib/tests/tokenizer.py
@@ -1,18 +1,15 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import codecs
 import json
 import warnings
 import re
 
 import pytest
-from six import unichr
 
 from html5lib._tokenizer import HTMLTokenizer
 from html5lib import constants, _utils
 
 
-class TokenizerTestParser(object):
+class TokenizerTestParser:
     def __init__(self, initialState, lastStartTag=None):
         self.tokenizer = HTMLTokenizer
         self._state = initialState
@@ -146,11 +143,11 @@ def repl(m):
                 low = int(m.group(2), 16)
                 if 0xD800 <= high <= 0xDBFF and 0xDC00 <= low <= 0xDFFF:
                     cp = ((high - 0xD800) << 10) + (low - 0xDC00) + 0x10000
-                    return unichr(cp)
+                    return chr(cp)
                 else:
-                    return unichr(high) + unichr(low)
+                    return chr(high) + chr(low)
             else:
-                return unichr(int(m.group(1), 16))
+                return chr(int(m.group(1), 16))
         try:
             return _surrogateRe.sub(repl, inp)
         except ValueError:
@@ -196,7 +193,7 @@ def collect(self):
 
 class TokenizerTestCollector(pytest.Collector):
     def __init__(self, name, parent=None, config=None, session=None, testdata=None):
-        super(TokenizerTestCollector, self).__init__(name, parent, config, session)
+        super().__init__(name, parent, config, session)
         if 'initialStates' not in testdata:
             testdata["initialStates"] = ["Data state"]
         if 'doubleEscaped' in testdata:
@@ -217,7 +214,7 @@ def collect(self):
 
 class TokenizerTest(pytest.Item):
     def __init__(self, name, parent, test, initialState):
-        super(TokenizerTest, self).__init__(name, parent)
+        super().__init__(name, parent)
         self.obj = lambda: 1  # this is to hack around skipif needing a function!
         self.test = test
         self.initialState = initialState
diff --git a/html5lib/tests/tokenizertotree.py b/html5lib/tests/tokenizertotree.py
index b841c76c..2c2bf62a 100644
--- a/html5lib/tests/tokenizertotree.py
+++ b/html5lib/tests/tokenizertotree.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import sys
 import os
 import json
@@ -53,14 +51,14 @@ def make_test(test_data):
 
     rv = []
     rv.append("#data")
-    rv.append(test_data["input"].encode("utf8"))
+    rv.append(test_data["input"].encode())
     rv.append("#errors")
     tree = p.parse(test_data["input"])
     output = p.tree.testSerializer(tree)
     output = "\n".join(("| " + line[3:]) if line.startswith("|  ") else line
                        for line in output.split("\n"))
     output = unnamespaceExpected(r"\1<\2>", output)
-    rv.append(output.encode("utf8"))
+    rv.append(output.encode())
     rv.append("")
     return "\n".join(rv)
 
diff --git a/html5lib/tests/tree_construction.py b/html5lib/tests/tree_construction.py
index c6e7ca09..c7642e2c 100644
--- a/html5lib/tests/tree_construction.py
+++ b/html5lib/tests/tree_construction.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 import itertools
 import re
 import warnings
@@ -31,14 +29,13 @@ def collect(self):
 
 class TreeConstructionTest(pytest.Collector):
     def __init__(self, name, parent=None, config=None, session=None, testdata=None):
-        super(TreeConstructionTest, self).__init__(name, parent, config, session)
+        super().__init__(name, parent, config, session)
         self.testdata = testdata
 
     def collect(self):
         for treeName, treeAPIs in sorted(treeTypes.items()):
-            for x in itertools.chain(self._getParserTests(treeName, treeAPIs),
-                                     self._getTreeWalkerTests(treeName, treeAPIs)):
-                yield x
+            yield from itertools.chain(self._getParserTests(treeName, treeAPIs),
+                                       self._getTreeWalkerTests(treeName, treeAPIs))
 
     def _getParserTests(self, treeName, treeAPIs):
         if treeAPIs is not None and "adapter" in treeAPIs:
@@ -82,7 +79,7 @@ def convertTreeDump(data):
 
 class ParserTest(pytest.Item):
     def __init__(self, name, parent, test, treeClass, namespaceHTMLElements):
-        super(ParserTest, self).__init__(name, parent)
+        super().__init__(name, parent)
         self.obj = lambda: 1  # this is to hack around skipif needing a function!
         self.test = test
         self.treeClass = treeClass
@@ -145,7 +142,7 @@ def repr_failure(self, excinfo):
 
 class TreeWalkerTest(pytest.Item):
     def __init__(self, name, parent, test, treeAPIs):
-        super(TreeWalkerTest, self).__init__(name, parent)
+        super().__init__(name, parent)
         self.obj = lambda: 1  # this is to hack around skipif needing a function!
         self.test = test
         self.treeAPIs = treeAPIs
diff --git a/html5lib/treeadapters/__init__.py b/html5lib/treeadapters/__init__.py
index dfeb0ba5..1444fc9a 100644
--- a/html5lib/treeadapters/__init__.py
+++ b/html5lib/treeadapters/__init__.py
@@ -16,7 +16,6 @@
    genshi_tree = genshi.to_genshi(TreeWalker(tree))
 
 """
-from __future__ import absolute_import, division, unicode_literals
 
 from . import sax
 
diff --git a/html5lib/treeadapters/genshi.py b/html5lib/treeadapters/genshi.py
index 61d5fb6a..370acb71 100644
--- a/html5lib/treeadapters/genshi.py
+++ b/html5lib/treeadapters/genshi.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from genshi.core import QName, Attrs
 from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
 
diff --git a/html5lib/treeadapters/sax.py b/html5lib/treeadapters/sax.py
index f4ccea5a..04ec1ef0 100644
--- a/html5lib/treeadapters/sax.py
+++ b/html5lib/treeadapters/sax.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from xml.sax.xmlreader import AttributesNSImpl
 
 from ..constants import adjustForeignAttributes, unadjustForeignAttributes
diff --git a/html5lib/treebuilders/__init__.py b/html5lib/treebuilders/__init__.py
index d44447ea..90aad5fb 100644
--- a/html5lib/treebuilders/__init__.py
+++ b/html5lib/treebuilders/__init__.py
@@ -29,7 +29,6 @@
 
 """
 
-from __future__ import absolute_import, division, unicode_literals
 
 from .._utils import default_etree
 
diff --git a/html5lib/treebuilders/base.py b/html5lib/treebuilders/base.py
index e4a3d710..69b2eae9 100644
--- a/html5lib/treebuilders/base.py
+++ b/html5lib/treebuilders/base.py
@@ -1,6 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-from six import text_type
-
 from ..constants import scopingElements, tableInsertModeElements, namespaces
 
 # The scope markers are inserted when entering object elements,
@@ -20,7 +17,7 @@
 }
 
 
-class Node(object):
+class Node:
     """Represents an item in the tree"""
     def __init__(self, name):
         """Creates a Node
@@ -143,7 +140,7 @@ def nodesEqual(self, node1, node2):
         return True
 
 
-class TreeBuilder(object):
+class TreeBuilder:
     """Base treebuilder implementation
 
     * documentClass - the class to use for the bottommost node of a document
@@ -199,7 +196,7 @@ def elementInScope(self, target, variant=None):
         # match any node with that name
         exactNode = hasattr(target, "nameTuple")
         if not exactNode:
-            if isinstance(target, text_type):
+            if isinstance(target, str):
                 target = (namespaces["html"], target)
             assert isinstance(target, tuple)
 
@@ -322,7 +319,7 @@ def _setInsertFromTable(self, value):
 
     def insertElementNormal(self, token):
         name = token["name"]
-        assert isinstance(name, text_type), "Element %s not unicode" % name
+        assert isinstance(name, str), "Element %s not unicode" % name
         namespace = token.get("namespace", self.defaultNamespace)
         element = self.elementClass(name, namespace)
         element.attributes = token["data"]
diff --git a/html5lib/treebuilders/dom.py b/html5lib/treebuilders/dom.py
index d8b53004..6a77e7df 100644
--- a/html5lib/treebuilders/dom.py
+++ b/html5lib/treebuilders/dom.py
@@ -1,10 +1,4 @@
-from __future__ import absolute_import, division, unicode_literals
-
-
-try:
-    from collections.abc import MutableMapping
-except ImportError:  # Python 2.7
-    from collections import MutableMapping
+from collections.abc import MutableMapping
 from xml.dom import minidom, Node
 import weakref
 
diff --git a/html5lib/treebuilders/etree.py b/html5lib/treebuilders/etree.py
index cb1d4aef..4bf820e7 100644
--- a/html5lib/treebuilders/etree.py
+++ b/html5lib/treebuilders/etree.py
@@ -1,8 +1,5 @@
-from __future__ import absolute_import, division, unicode_literals
 # pylint:disable=protected-access
 
-from six import text_type
-
 import re
 
 from . import base
@@ -219,7 +216,7 @@ def serializeElement(element, indent=0):
             elif element.tag == ElementTreeCommentType:
                 rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
             else:
-                assert isinstance(element.tag, text_type), \
+                assert isinstance(element.tag, str), \
                     "Expected unicode, got %s, %s" % (type(element.tag), element.tag)
                 nsmatch = tag_regexp.match(element.tag)
 
diff --git a/html5lib/treebuilders/etree_lxml.py b/html5lib/treebuilders/etree_lxml.py
index ca12a99c..446003a3 100644
--- a/html5lib/treebuilders/etree_lxml.py
+++ b/html5lib/treebuilders/etree_lxml.py
@@ -9,12 +9,10 @@
 When any of these things occur, we emit a DataLossWarning
 """
 
-from __future__ import absolute_import, division, unicode_literals
 # pylint:disable=protected-access
 
 import warnings
 import re
-import sys
 
 from . import base
 from ..constants import DataLossWarning
@@ -31,14 +29,14 @@
 comment_type = etree.Comment("asd").tag
 
 
-class DocumentType(object):
+class DocumentType:
     def __init__(self, name, publicId, systemId):
         self.name = name
         self.publicId = publicId
         self.systemId = systemId
 
 
-class Document(object):
+class Document:
     def __init__(self):
         self._elementTree = None
         self._childNodes = []
@@ -77,9 +75,7 @@ def serializeElement(element, indent=0):
                 while next_element is not None:
                     serializeElement(next_element, indent + 2)
                     next_element = next_element.getnext()
-            elif isinstance(element, str) or isinstance(element, bytes):
-                # Text in a fragment
-                assert isinstance(element, str) or sys.version_info[0] == 2
+            elif isinstance(element, str):
                 rv.append("|%s\"%s\"" % (' ' * indent, element))
             else:
                 # Fragment case
@@ -306,7 +302,7 @@ def insertCommentMain(self, data, parent=None):
         if (parent == self.document and
                 self.document._elementTree.getroot()[-1].tag == comment_type):
             warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
-        super(TreeBuilder, self).insertComment(data, parent)
+        super().insertComment(data, parent)
 
     def insertRoot(self, token):
         # Because of the way libxml2 works, it doesn't seem to be possible to
diff --git a/html5lib/treewalkers/__init__.py b/html5lib/treewalkers/__init__.py
index b2d3aac3..b78d6f46 100644
--- a/html5lib/treewalkers/__init__.py
+++ b/html5lib/treewalkers/__init__.py
@@ -8,7 +8,6 @@
 returns an iterator which generates tokens.
 """
 
-from __future__ import absolute_import, division, unicode_literals
 
 from .. import constants
 from .._utils import default_etree
diff --git a/html5lib/treewalkers/base.py b/html5lib/treewalkers/base.py
index 80c474c4..57958a5c 100644
--- a/html5lib/treewalkers/base.py
+++ b/html5lib/treewalkers/base.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from xml.dom import Node
 from ..constants import namespaces, voidElements, spaceCharacters
 
@@ -17,7 +15,7 @@
 spaceCharacters = "".join(spaceCharacters)
 
 
-class TreeWalker(object):
+class TreeWalker:
     """Walks a tree yielding tokens
 
     Tokens are dicts that all have a ``type`` field specifying the type of the
@@ -201,15 +199,12 @@ def __iter__(self):
                 yield self.doctype(*details)
 
             elif type == TEXT:
-                for token in self.text(*details):
-                    yield token
+                yield from self.text(*details)
 
             elif type == ELEMENT:
                 namespace, name, attributes, hasChildren = details
                 if (not namespace or namespace == namespaces["html"]) and name in voidElements:
-                    for token in self.emptyTag(namespace, name, attributes,
-                                               hasChildren):
-                        yield token
+                    yield from self.emptyTag(namespace, name, attributes, hasChildren)
                     hasChildren = False
                 else:
                     yield self.startTag(namespace, name, attributes)
diff --git a/html5lib/treewalkers/dom.py b/html5lib/treewalkers/dom.py
index b0c89b00..ac88cd9d 100644
--- a/html5lib/treewalkers/dom.py
+++ b/html5lib/treewalkers/dom.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from xml.dom import Node
 
 from . import base
diff --git a/html5lib/treewalkers/etree.py b/html5lib/treewalkers/etree.py
index d15a7eeb..9ea2b8ac 100644
--- a/html5lib/treewalkers/etree.py
+++ b/html5lib/treewalkers/etree.py
@@ -1,10 +1,6 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from collections import OrderedDict
 import re
 
-from six import string_types
-
 from . import base
 from .._utils import moduleFactoryFactory
 
@@ -51,7 +47,7 @@ def getNodeDetails(self, node):
                 return base.COMMENT, node.text
 
             else:
-                assert isinstance(node.tag, string_types), type(node.tag)
+                assert isinstance(node.tag, str), type(node.tag)
                 # This is assumed to be an ordinary element
                 match = tag_regexp.match(node.tag)
                 if match:
diff --git a/html5lib/treewalkers/etree_lxml.py b/html5lib/treewalkers/etree_lxml.py
index fb236311..5acd4906 100644
--- a/html5lib/treewalkers/etree_lxml.py
+++ b/html5lib/treewalkers/etree_lxml.py
@@ -1,6 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-from six import text_type
-
 from lxml import etree
 from ..treebuilders.etree import tag_regexp
 
@@ -12,13 +9,13 @@
 def ensure_str(s):
     if s is None:
         return None
-    elif isinstance(s, text_type):
+    elif isinstance(s, str):
         return s
     else:
         return s.decode("ascii", "strict")
 
 
-class Root(object):
+class Root:
     def __init__(self, et):
         self.elementtree = et
         self.children = []
@@ -56,7 +53,7 @@ def __len__(self):
         return 1
 
 
-class Doctype(object):
+class Doctype:
     def __init__(self, root_node, name, public_id, system_id):
         self.root_node = root_node
         self.name = name
@@ -79,7 +76,7 @@ def getnext(self):
         return None
 
 
-class FragmentWrapper(object):
+class FragmentWrapper:
     def __init__(self, fragment_root, obj):
         self.root_node = fragment_root
         self.obj = obj
diff --git a/html5lib/treewalkers/genshi.py b/html5lib/treewalkers/genshi.py
index 7483be27..d4757af2 100644
--- a/html5lib/treewalkers/genshi.py
+++ b/html5lib/treewalkers/genshi.py
@@ -1,5 +1,3 @@
-from __future__ import absolute_import, division, unicode_literals
-
 from genshi.core import QName
 from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
 from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
@@ -15,14 +13,12 @@ def __iter__(self):
         previous = None
         for event in self.tree:
             if previous is not None:
-                for token in self.tokens(previous, event):
-                    yield token
+                yield from self.tokens(previous, event)
             previous = event
 
         # Don't forget the final event!
         if previous is not None:
-            for token in self.tokens(previous, None):
-                yield token
+            yield from self.tokens(previous, None)
 
     def tokens(self, event, next):
         kind, data, _ = event
@@ -38,10 +34,9 @@ def tokens(self, event, next):
                     converted_attribs[(None, k)] = v
 
             if namespace == namespaces["html"] and name in voidElements:
-                for token in self.emptyTag(namespace, name, converted_attribs,
-                                           not next or next[0] != END or
-                                           next[1] != tag):
-                    yield token
+                yield from self.emptyTag(namespace, name, converted_attribs,
+                                         not next or next[0] != END or
+                                         next[1] != tag)
             else:
                 yield self.startTag(namespace, name, converted_attribs)
 
@@ -55,8 +50,7 @@ def tokens(self, event, next):
             yield self.comment(data)
 
         elif kind == TEXT:
-            for token in self.text(data):
-                yield token
+            yield from self.text(data)
 
         elif kind == DOCTYPE:
             yield self.doctype(*data)
diff --git a/parse.py b/parse.py
index 3e65c330..08eeeecd 100755
--- a/parse.py
+++ b/parse.py
@@ -37,13 +37,12 @@ def parse():
                 pass
         elif f == '-':
             f = sys.stdin
-            if sys.version_info[0] >= 3:
-                encoding = None
+            encoding = None
         else:
             try:
                 # Try opening from file system
                 f = open(f, "rb")
-            except IOError as e:
+            except OSError as e:
                 sys.stderr.write("Unable to open file: %s\n" % e)
                 sys.exit(1)
     except IndexError:
@@ -136,11 +135,7 @@ def printOutput(parser, document, opts):
                 kwargs["sanitize"] = True
 
             tokens = treewalkers.getTreeWalker(opts.treebuilder)(document)
-            if sys.version_info[0] >= 3:
-                encoding = None
-            else:
-                encoding = "utf-8"
-            for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding):
+            for text in serializer.HTMLSerializer(**kwargs).serialize(tokens):
                 sys.stdout.write(text)
             if not text.endswith('\n'):
                 sys.stdout.write('\n')
diff --git a/requirements-install.sh b/requirements-install.sh
index b7a8d96d..41d9bc42 100755
--- a/requirements-install.sh
+++ b/requirements-install.sh
@@ -1,9 +1,5 @@
 #!/bin/bash -ex
 
-if [[ $SIX_VERSION ]]; then
-  pip install six==$SIX_VERSION
-fi
-
 pip install -r requirements-test.txt
 
 if [[ $USE_OPTIONAL == "true" ]]; then
diff --git a/requirements-test.txt b/requirements-test.txt
index 4e223a3f..b4e048e8 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -7,4 +7,3 @@ flake8<3.0
 pytest==3.2.5
 coverage
 pytest-expect>=1.1,<2.0
-mock
diff --git a/requirements.txt b/requirements.txt
index ae7ec3d0..be8fcb77 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1 @@
-six>=1.9
 webencodings
diff --git a/setup.cfg b/setup.cfg
index d309fdaa..fb819ffe 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,3 @@
-[bdist_wheel]
-universal = 1
-
 [pep8]
 ignore = N
 max-line-length = 139
diff --git a/setup.py b/setup.py
index 27c97727..2745141d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,5 @@
-from __future__ import print_function
-
 import ast
 import codecs
-import sys
 
 from os.path import join, dirname
 from setuptools import setup, find_packages, __version__ as setuptools_version
@@ -32,7 +29,7 @@ def pop(self, i=-1):
         return self[i]
 
 
-if _markerlib and sys.version_info[0] == 3:
+if _markerlib:
     env = _markerlib.markers._VARS
     for key in list(env.keys()):
         new_key = key.replace('.', '_')
@@ -63,9 +60,8 @@ def default_environment():
     'License :: OSI Approved :: MIT License',
     'Operating System :: OS Independent',
     'Programming Language :: Python',
-    'Programming Language :: Python :: 2',
-    'Programming Language :: Python :: 2.7',
     'Programming Language :: Python :: 3',
+    'Programming Language :: Python :: 3 :: Only',
     'Programming Language :: Python :: 3.5',
     'Programming Language :: Python :: 3.6',
     'Topic :: Software Development :: Libraries :: Python Modules',
@@ -100,10 +96,9 @@ def default_environment():
       maintainer_email='james@hoppipolla.co.uk',
       packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
       install_requires=[
-          'six>=1.9',
           'webencodings',
       ],
-      python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
+      python_requires=">=3.5",
       extras_require={
           # A conditional extra will only install these items when the extra is
           # requested and the condition matches.
diff --git a/tox.ini b/tox.ini
index edb752f6..f4093511 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = {py27,py35,py36,pypy}-{base,six19,optional}
+envlist = {py35,py36,pypy3}-{base,optional}
 
 [testenv]
 deps =
@@ -11,7 +11,6 @@ passenv =
   PYTEST_COMMAND
   COVERAGE_RUN_OPTIONS
 commands =
-  six19: pip install six==1.9
   {env:PYTEST_COMMAND:{envbindir}/py.test} {posargs}
   flake8 {toxinidir}