Re-land "Check in a simple pure-python based Markdown previewer."

This re-lands #352450 with a fix to make checklicenses.py happy.

R=thestig@chromium.org
TBR=jam@chromium.org

Review URL: https://codereview.chromium.org/1392733002

Cr-Original-Commit-Position: refs/heads/master@{#352731}
Cr-Mirrored-From: https://chromium.googlesource.com/chromium/src
Cr-Mirrored-Commit: 27c171cd168807f85b95ae8aaa797bda02eff319
diff --git a/markdown/__init__.py b/markdown/__init__.py
new file mode 100644
index 0000000..1b86553
--- /dev/null
+++ b/markdown/__init__.py
@@ -0,0 +1,529 @@
+"""
+Python Markdown
+===============
+
+Python Markdown converts Markdown to HTML and can be used as a library or
+called from the command line.
+
+## Basic usage as a module:
+
+    import markdown
+    html = markdown.markdown(your_text_string)
+
+See <https://pythonhosted.org/Markdown/> for more
+information and instructions on how to extend the functionality of
+Python Markdown.  Read that before you try modifying this file.
+
+## Authors and License
+
+Started by [Manfred Stienstra](http://www.dwerg.net/).  Continued and
+maintained  by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
+Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
+
+Contact: markdown@freewisdom.org
+
+Copyright 2007-2013 The Python Markdown Project (v. 1.7 and later)
+Copyright 200? Django Software Foundation (OrderedDict implementation)
+Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
+Copyright 2004 Manfred Stienstra (the original version)
+
+License: BSD (see LICENSE for details).
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from .__version__ import version, version_info  # noqa
+import codecs
+import sys
+import logging
+import warnings
+import importlib
+from . import util
+from .preprocessors import build_preprocessors
+from .blockprocessors import build_block_parser
+from .treeprocessors import build_treeprocessors
+from .inlinepatterns import build_inlinepatterns
+from .postprocessors import build_postprocessors
+from .extensions import Extension
+from .serializers import to_html_string, to_xhtml_string
+
+__all__ = ['Markdown', 'markdown', 'markdownFromFile']
+
+
+logger = logging.getLogger('MARKDOWN')
+
+
+class Markdown(object):
+    """Convert Markdown to HTML."""
+
+    doc_tag = "div"     # Element used to wrap document - later removed
+
+    option_defaults = {
+        'html_replacement_text': '[HTML_REMOVED]',
+        'tab_length':            4,
+        'enable_attributes':     True,
+        'smart_emphasis':        True,
+        'lazy_ol':               True,
+    }
+
+    output_formats = {
+        'html':   to_html_string,
+        'html4':  to_html_string,
+        'html5':  to_html_string,
+        'xhtml':  to_xhtml_string,
+        'xhtml1': to_xhtml_string,
+        'xhtml5': to_xhtml_string,
+    }
+
+    ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
+                     '(', ')', '>', '#', '+', '-', '.', '!']
+
+    def __init__(self, *args, **kwargs):
+        """
+        Creates a new Markdown instance.
+
+        Keyword arguments:
+
+        * extensions: A list of extensions.
+           If they are of type string, the module mdx_name.py will be loaded.
+           If they are a subclass of markdown.Extension, they will be used
+           as-is.
+        * extension_configs: Configuration settings for extensions.
+        * output_format: Format of output. Supported formats are:
+            * "xhtml1": Outputs XHTML 1.x. Default.
+            * "xhtml5": Outputs XHTML style tags of HTML 5
+            * "xhtml": Outputs latest supported version of XHTML
+              (currently XHTML 1.1).
+            * "html4": Outputs HTML 4
+            * "html5": Outputs HTML style tags of HTML 5
+            * "html": Outputs latest supported version of HTML
+              (currently HTML 4).
+            Note that it is suggested that the more specific formats ("xhtml1"
+            and "html4") be used as "xhtml" or "html" may change in the future
+            if it makes sense at that time.
+        * safe_mode: Deprecated! Disallow raw html. One of "remove", "replace"
+          or "escape".
+        * html_replacement_text: Deprecated! Text used when safe_mode is set
+          to "replace".
+        * tab_length: Length of tabs in the source. Default: 4
+        * enable_attributes: Enable the conversion of attributes. Default: True
+        * smart_emphasis: Treat `_connected_words_` intelligently Default: True
+        * lazy_ol: Ignore number of first item of ordered lists. Default: True
+
+        """
+
+        # For backward compatibility, loop through old positional args
+        pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
+        for c, arg in enumerate(args):
+            if pos[c] not in kwargs:
+                kwargs[pos[c]] = arg
+            if c+1 == len(pos):  # pragma: no cover
+                # ignore any additional args
+                break
+        if len(args):
+            warnings.warn('Positional arguments are deprecated in Markdown. '
+                          'Use keyword arguments only.',
+                          DeprecationWarning)
+
+        # Loop through kwargs and assign defaults
+        for option, default in self.option_defaults.items():
+            setattr(self, option, kwargs.get(option, default))
+
+        self.safeMode = kwargs.get('safe_mode', False)
+        if self.safeMode and 'enable_attributes' not in kwargs:
+            # Disable attributes in safeMode when not explicitly set
+            self.enable_attributes = False
+
+        if 'safe_mode' in kwargs:
+            warnings.warn('"safe_mode" is deprecated in Python-Markdown. '
+                          'Use an HTML sanitizer (like '
+                          'Bleach http://bleach.readthedocs.org/) '
+                          'if you are parsing untrusted markdown text. '
+                          'See the 2.6 release notes for more info',
+                          DeprecationWarning)
+
+        if 'html_replacement_text' in kwargs:
+            warnings.warn('The "html_replacement_text" keyword is '
+                          'deprecated along with "safe_mode".',
+                          DeprecationWarning)
+
+        self.registeredExtensions = []
+        self.docType = ""
+        self.stripTopLevelTags = True
+
+        self.build_parser()
+
+        self.references = {}
+        self.htmlStash = util.HtmlStash()
+        self.registerExtensions(extensions=kwargs.get('extensions', []),
+                                configs=kwargs.get('extension_configs', {}))
+        self.set_output_format(kwargs.get('output_format', 'xhtml1'))
+        self.reset()
+
+    def build_parser(self):
+        """ Build the parser from the various parts. """
+        self.preprocessors = build_preprocessors(self)
+        self.parser = build_block_parser(self)
+        self.inlinePatterns = build_inlinepatterns(self)
+        self.treeprocessors = build_treeprocessors(self)
+        self.postprocessors = build_postprocessors(self)
+        return self
+
+    def registerExtensions(self, extensions, configs):
+        """
+        Register extensions with this instance of Markdown.
+
+        Keyword arguments:
+
+        * extensions: A list of extensions, which can either
+           be strings or objects.  See the docstring on Markdown.
+        * configs: A dictionary mapping module names to config options.
+
+        """
+        for ext in extensions:
+            if isinstance(ext, util.string_type):
+                ext = self.build_extension(ext, configs.get(ext, {}))
+            if isinstance(ext, Extension):
+                ext.extendMarkdown(self, globals())
+                logger.debug(
+                    'Successfully loaded extension "%s.%s".'
+                    % (ext.__class__.__module__, ext.__class__.__name__)
+                )
+            elif ext is not None:
+                raise TypeError(
+                    'Extension "%s.%s" must be of type: "markdown.Extension"'
+                    % (ext.__class__.__module__, ext.__class__.__name__))
+
+        return self
+
+    def build_extension(self, ext_name, configs):
+        """Build extension by name, then return the module.
+
+        The extension name may contain arguments as part of the string in the
+        following format: "extname(key1=value1,key2=value2)"
+
+        """
+
+        configs = dict(configs)
+
+        # Parse extensions config params (ignore the order)
+        pos = ext_name.find("(")  # find the first "("
+        if pos > 0:
+            ext_args = ext_name[pos+1:-1]
+            ext_name = ext_name[:pos]
+            pairs = [x.split("=") for x in ext_args.split(",")]
+            configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
+            warnings.warn('Setting configs in the Named Extension string is '
+                          'deprecated. It is recommended that you '
+                          'pass an instance of the extension class to '
+                          'Markdown or use the "extension_configs" keyword. '
+                          'The current behavior will raise an error in version 2.7. '
+                          'See the Release Notes for Python-Markdown version '
+                          '2.6 for more info.', DeprecationWarning)
+
+        # Get class name (if provided): `path.to.module:ClassName`
+        ext_name, class_name = ext_name.split(':', 1) \
+            if ':' in ext_name else (ext_name, '')
+
+        # Try loading the extension first from one place, then another
+        try:
+            # Assume string uses dot syntax (`path.to.some.module`)
+            module = importlib.import_module(ext_name)
+            logger.debug(
+                'Successfuly imported extension module "%s".' % ext_name
+            )
+            # For backward compat (until deprecation)
+            # check that this is an extension.
+            if ('.' not in ext_name and not (hasattr(module, 'makeExtension') or
+               (class_name and hasattr(module, class_name)))):
+                # We have a name conflict
+                # eg: extensions=['tables'] and PyTables is installed
+                raise ImportError
+        except ImportError:
+            # Preppend `markdown.extensions.` to name
+            module_name = '.'.join(['markdown.extensions', ext_name])
+            try:
+                module = importlib.import_module(module_name)
+                logger.debug(
+                    'Successfuly imported extension module "%s".' %
+                    module_name
+                )
+                warnings.warn('Using short names for Markdown\'s builtin '
+                              'extensions is deprecated. Use the '
+                              'full path to the extension with Python\'s dot '
+                              'notation (eg: "%s" instead of "%s"). The '
+                              'current behavior will raise an error in version '
+                              '2.7. See the Release Notes for '
+                              'Python-Markdown version 2.6 for more info.' %
+                              (module_name, ext_name),
+                              DeprecationWarning)
+            except ImportError:
+                # Preppend `mdx_` to name
+                module_name_old_style = '_'.join(['mdx', ext_name])
+                try:
+                    module = importlib.import_module(module_name_old_style)
+                    logger.debug(
+                        'Successfuly imported extension module "%s".' %
+                        module_name_old_style)
+                    warnings.warn('Markdown\'s behavior of prepending "mdx_" '
+                                  'to an extension name is deprecated. '
+                                  'Use the full path to the '
+                                  'extension with Python\'s dot notation '
+                                  '(eg: "%s" instead of "%s"). The current '
+                                  'behavior will raise an error in version 2.7. '
+                                  'See the Release Notes for Python-Markdown '
+                                  'version 2.6 for more info.' %
+                                  (module_name_old_style, ext_name),
+                                  DeprecationWarning)
+                except ImportError as e:
+                    message = "Failed loading extension '%s' from '%s', '%s' " \
+                        "or '%s'" % (ext_name, ext_name, module_name,
+                                     module_name_old_style)
+                    e.args = (message,) + e.args[1:]
+                    raise
+
+        if class_name:
+            # Load given class name from module.
+            return getattr(module, class_name)(**configs)
+        else:
+            # Expect  makeExtension() function to return a class.
+            try:
+                return module.makeExtension(**configs)
+            except AttributeError as e:
+                message = e.args[0]
+                message = "Failed to initiate extension " \
+                          "'%s': %s" % (ext_name, message)
+                e.args = (message,) + e.args[1:]
+                raise
+
+    def registerExtension(self, extension):
+        """ This gets called by the extension """
+        self.registeredExtensions.append(extension)
+        return self
+
+    def reset(self):
+        """
+        Resets all state variables so that we can start with a new text.
+        """
+        self.htmlStash.reset()
+        self.references.clear()
+
+        for extension in self.registeredExtensions:
+            if hasattr(extension, 'reset'):
+                extension.reset()
+
+        return self
+
+    def set_output_format(self, format):
+        """ Set the output format for the class instance. """
+        self.output_format = format.lower()
+        try:
+            self.serializer = self.output_formats[self.output_format]
+        except KeyError as e:
+            valid_formats = list(self.output_formats.keys())
+            valid_formats.sort()
+            message = 'Invalid Output Format: "%s". Use one of %s.' \
+                % (self.output_format,
+                   '"' + '", "'.join(valid_formats) + '"')
+            e.args = (message,) + e.args[1:]
+            raise
+        return self
+
+    def convert(self, source):
+        """
+        Convert markdown to serialized XHTML or HTML.
+
+        Keyword arguments:
+
+        * source: Source text as a Unicode string.
+
+        Markdown processing takes place in five steps:
+
+        1. A bunch of "preprocessors" munge the input text.
+        2. BlockParser() parses the high-level structural elements of the
+           pre-processed text into an ElementTree.
+        3. A bunch of "treeprocessors" are run against the ElementTree. One
+           such treeprocessor runs InlinePatterns against the ElementTree,
+           detecting inline markup.
+        4. Some post-processors are run against the text after the ElementTree
+           has been serialized into text.
+        5. The output is written to a string.
+
+        """
+
+        # Fixup the source text
+        if not source.strip():
+            return ''  # a blank unicode string
+
+        try:
+            source = util.text_type(source)
+        except UnicodeDecodeError as e:
+            # Customise error message while maintaining original trackback
+            e.reason += '. -- Note: Markdown only accepts unicode input!'
+            raise
+
+        # Split into lines and run the line preprocessors.
+        self.lines = source.split("\n")
+        for prep in self.preprocessors.values():
+            self.lines = prep.run(self.lines)
+
+        # Parse the high-level elements.
+        root = self.parser.parseDocument(self.lines).getroot()
+
+        # Run the tree-processors
+        for treeprocessor in self.treeprocessors.values():
+            newRoot = treeprocessor.run(root)
+            if newRoot is not None:
+                root = newRoot
+
+        # Serialize _properly_.  Strip top-level tags.
+        output = self.serializer(root)
+        if self.stripTopLevelTags:
+            try:
+                start = output.index(
+                    '<%s>' % self.doc_tag) + len(self.doc_tag) + 2
+                end = output.rindex('</%s>' % self.doc_tag)
+                output = output[start:end].strip()
+            except ValueError:  # pragma: no cover
+                if output.strip().endswith('<%s />' % self.doc_tag):
+                    # We have an empty document
+                    output = ''
+                else:
+                    # We have a serious problem
+                    raise ValueError('Markdown failed to strip top-level '
+                                     'tags. Document=%r' % output.strip())
+
+        # Run the text post-processors
+        for pp in self.postprocessors.values():
+            output = pp.run(output)
+
+        return output.strip()
+
+    def convertFile(self, input=None, output=None, encoding=None):
+        """Converts a Markdown file and returns the HTML as a Unicode string.
+
+        Decodes the file using the provided encoding (defaults to utf-8),
+        passes the file content to markdown, and outputs the html to either
+        the provided stream or the file with provided name, using the same
+        encoding as the source file. The 'xmlcharrefreplace' error handler is
+        used when encoding the output.
+
+        **Note:** This is the only place that decoding and encoding of Unicode
+        takes place in Python-Markdown.  (All other code is Unicode-in /
+        Unicode-out.)
+
+        Keyword arguments:
+
+        * input: File object or path. Reads from stdin if `None`.
+        * output: File object or path. Writes to stdout if `None`.
+        * encoding: Encoding of input and output files. Defaults to utf-8.
+
+        """
+
+        encoding = encoding or "utf-8"
+
+        # Read the source
+        if input:
+            if isinstance(input, util.string_type):
+                input_file = codecs.open(input, mode="r", encoding=encoding)
+            else:
+                input_file = codecs.getreader(encoding)(input)
+            text = input_file.read()
+            input_file.close()
+        else:
+            text = sys.stdin.read()
+            if not isinstance(text, util.text_type):
+                text = text.decode(encoding)
+
+        text = text.lstrip('\ufeff')  # remove the byte-order mark
+
+        # Convert
+        html = self.convert(text)
+
+        # Write to file or stdout
+        if output:
+            if isinstance(output, util.string_type):
+                output_file = codecs.open(output, "w",
+                                          encoding=encoding,
+                                          errors="xmlcharrefreplace")
+                output_file.write(html)
+                output_file.close()
+            else:
+                writer = codecs.getwriter(encoding)
+                output_file = writer(output, errors="xmlcharrefreplace")
+                output_file.write(html)
+                # Don't close here. User may want to write more.
+        else:
+            # Encode manually and write bytes to stdout.
+            html = html.encode(encoding, "xmlcharrefreplace")
+            try:
+                # Write bytes directly to buffer (Python 3).
+                sys.stdout.buffer.write(html)
+            except AttributeError:
+                # Probably Python 2, which works with bytes by default.
+                sys.stdout.write(html)
+
+        return self
+
+
+"""
+EXPORTED FUNCTIONS
+=============================================================================
+
+Those are the two functions we really mean to export: markdown() and
+markdownFromFile().
+"""
+
+
+def markdown(text, *args, **kwargs):
+    """Convert a Markdown string to HTML and return HTML as a Unicode string.
+
+    This is a shortcut function for `Markdown` class to cover the most
+    basic use case.  It initializes an instance of Markdown, loads the
+    necessary extensions and runs the parser on the given text.
+
+    Keyword arguments:
+
+    * text: Markdown formatted text as Unicode or ASCII string.
+    * Any arguments accepted by the Markdown class.
+
+    Returns: An HTML document as a string.
+
+    """
+    md = Markdown(*args, **kwargs)
+    return md.convert(text)
+
+
+def markdownFromFile(*args, **kwargs):
+    """Read markdown code from a file and write it to a file or a stream.
+
+    This is a shortcut function which initializes an instance of Markdown,
+    and calls the convertFile method rather than convert.
+
+    Keyword arguments:
+
+    * input: a file name or readable object.
+    * output: a file name or writable object.
+    * encoding: Encoding of input and output.
+    * Any arguments accepted by the Markdown class.
+
+    """
+    # For backward compatibility loop through positional args
+    pos = ['input', 'output', 'extensions', 'encoding']
+    c = 0
+    for arg in args:
+        if pos[c] not in kwargs:
+            kwargs[pos[c]] = arg
+        c += 1
+        if c == len(pos):
+            break
+    if len(args):
+        warnings.warn('Positional arguments are depreacted in '
+                      'Markdown and will raise an error in version 2.7. '
+                      'Use keyword arguments only.',
+                      DeprecationWarning)
+
+    md = Markdown(**kwargs)
+    md.convertFile(kwargs.get('input', None),
+                   kwargs.get('output', None),
+                   kwargs.get('encoding', None))
diff --git a/markdown/__main__.py b/markdown/__main__.py
new file mode 100644
index 0000000..17bfa9f
--- /dev/null
+++ b/markdown/__main__.py
@@ -0,0 +1,136 @@
+"""
+COMMAND-LINE SPECIFIC STUFF
+=============================================================================
+
+"""
+
+import sys
+import optparse
+import codecs
+import warnings
+import markdown
+try:
+    import yaml
+except ImportError:  # pragma: no cover
+    import json as yaml
+
+import logging
+from logging import DEBUG, WARNING, CRITICAL
+
+logger = logging.getLogger('MARKDOWN')
+
+
+def parse_options(args=None, values=None):
+    """
+    Define and parse `optparse` options for command-line usage.
+    """
+    usage = """%prog [options] [INPUTFILE]
+       (STDIN is assumed if no INPUTFILE is given)"""
+    desc = "A Python implementation of John Gruber's Markdown. " \
+           "https://pythonhosted.org/Markdown/"
+    ver = "%%prog %s" % markdown.version
+
+    parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
+    parser.add_option("-f", "--file", dest="filename", default=None,
+                      help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
+                      metavar="OUTPUT_FILE")
+    parser.add_option("-e", "--encoding", dest="encoding",
+                      help="Encoding for input and output files.",)
+    parser.add_option("-s", "--safe", dest="safe", default=False,
+                      metavar="SAFE_MODE",
+                      help="Deprecated! 'replace', 'remove' or 'escape' HTML "
+                      "tags in input")
+    parser.add_option("-o", "--output_format", dest="output_format",
+                      default='xhtml1', metavar="OUTPUT_FORMAT",
+                      help="'xhtml1' (default), 'html4' or 'html5'.")
+    parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
+                      action='store_false', default=True,
+                      help="Observe number of first item of ordered lists.")
+    parser.add_option("-x", "--extension", action="append", dest="extensions",
+                      help="Load extension EXTENSION.", metavar="EXTENSION")
+    parser.add_option("-c", "--extension_configs",
+                      dest="configfile", default=None,
+                      help="Read extension configurations from CONFIG_FILE. "
+                      "CONFIG_FILE must be of JSON or YAML format. YAML"
+                      "format requires that a python YAML library be "
+                      "installed. The parsed JSON or YAML must result in a "
+                      "python dictionary which would be accepted by the "
+                      "'extension_configs' keyword on the markdown.Markdown "
+                      "class. The extensions must also be loaded with the "
+                      "`--extension` option.",
+                      metavar="CONFIG_FILE")
+    parser.add_option("-q", "--quiet", default=CRITICAL,
+                      action="store_const", const=CRITICAL+10, dest="verbose",
+                      help="Suppress all warnings.")
+    parser.add_option("-v", "--verbose",
+                      action="store_const", const=WARNING, dest="verbose",
+                      help="Print all warnings.")
+    parser.add_option("--noisy",
+                      action="store_const", const=DEBUG, dest="verbose",
+                      help="Print debug messages.")
+
+    (options, args) = parser.parse_args(args, values)
+
+    if len(args) == 0:
+        input_file = None
+    else:
+        input_file = args[0]
+
+    if not options.extensions:
+        options.extensions = []
+
+    extension_configs = {}
+    if options.configfile:
+        with codecs.open(
+            options.configfile, mode="r", encoding=options.encoding
+        ) as fp:
+            try:
+                extension_configs = yaml.load(fp)
+            except Exception as e:
+                message = "Failed parsing extension config file: %s" % \
+                          options.configfile
+                e.args = (message,) + e.args[1:]
+                raise
+
+    opts = {
+        'input': input_file,
+        'output': options.filename,
+        'extensions': options.extensions,
+        'extension_configs': extension_configs,
+        'encoding': options.encoding,
+        'output_format': options.output_format,
+        'lazy_ol': options.lazy_ol
+    }
+
+    if options.safe:
+        # Avoid deprecation warning if user didn't set option
+        opts['safe_mode'] = options.safe
+
+    return opts, options.verbose
+
+
+def run():  # pragma: no cover
+    """Run Markdown from the command line."""
+
+    # Parse options and adjust logging level if necessary
+    options, logging_level = parse_options()
+    if not options:
+        sys.exit(2)
+    logger.setLevel(logging_level)
+    console_handler = logging.StreamHandler()
+    logger.addHandler(console_handler)
+    if logging_level <= WARNING:
+        # Ensure deprecation warnings get displayed
+        warnings.filterwarnings('default')
+        logging.captureWarnings(True)
+        warn_logger = logging.getLogger('py.warnings')
+        warn_logger.addHandler(console_handler)
+
+    # Run
+    markdown.markdownFromFile(**options)
+
+
+if __name__ == '__main__':  # pragma: no cover
+    # Support running module as a commandline command.
+    # Python 2.7 & 3.x do: `python -m markdown [options] [args]`.
+    run()
diff --git a/markdown/__version__.py b/markdown/__version__.py
new file mode 100644
index 0000000..3442504
--- /dev/null
+++ b/markdown/__version__.py
@@ -0,0 +1,29 @@
+#
+# markdown/__version__.py
+#
+# version_info should conform to PEP 386
+# (major, minor, micro, alpha/beta/rc/final, #)
+# (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
+# (1, 2, 0, 'beta', 2) => "1.2b2"
+version_info = (2, 6, 2, 'final', 0)
+
+
+def _get_version():
+    " Returns a PEP 386-compliant version number from version_info. "
+    assert len(version_info) == 5
+    assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
+
+    parts = 2 if version_info[2] == 0 else 3
+    main = '.'.join(map(str, version_info[:parts]))
+
+    sub = ''
+    if version_info[3] == 'alpha' and version_info[4] == 0:
+        # TODO: maybe append some sort of git info here??
+        sub = '.dev'
+    elif version_info[3] != 'final':
+        mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
+        sub = mapping[version_info[3]] + str(version_info[4])
+
+    return str(main + sub)
+
+version = _get_version()
diff --git a/markdown/blockparser.py b/markdown/blockparser.py
new file mode 100644
index 0000000..32d3254
--- /dev/null
+++ b/markdown/blockparser.py
@@ -0,0 +1,100 @@
+from __future__ import unicode_literals
+from __future__ import absolute_import
+from . import util
+from . import odict
+
+
+class State(list):
+    """ Track the current and nested state of the parser.
+
+    This utility class is used to track the state of the BlockParser and
+    support multiple levels if nesting. It's just a simple API wrapped around
+    a list. Each time a state is set, that state is appended to the end of the
+    list. Each time a state is reset, that state is removed from the end of
+    the list.
+
+    Therefore, each time a state is set for a nested block, that state must be
+    reset when we back out of that level of nesting or the state could be
+    corrupted.
+
+    While all the methods of a list object are available, only the three
+    defined below need be used.
+
+    """
+
+    def set(self, state):
+        """ Set a new state. """
+        self.append(state)
+
+    def reset(self):
+        """ Step back one step in nested state. """
+        self.pop()
+
+    def isstate(self, state):
+        """ Test that top (current) level is of given state. """
+        if len(self):
+            return self[-1] == state
+        else:
+            return False
+
+
+class BlockParser:
+    """ Parse Markdown blocks into an ElementTree object.
+
+    A wrapper class that stitches the various BlockProcessors together,
+    looping through them and creating an ElementTree object.
+    """
+
+    def __init__(self, markdown):
+        self.blockprocessors = odict.OrderedDict()
+        self.state = State()
+        self.markdown = markdown
+
+    def parseDocument(self, lines):
+        """ Parse a markdown document into an ElementTree.
+
+        Given a list of lines, an ElementTree object (not just a parent
+        Element) is created and the root element is passed to the parser
+        as the parent. The ElementTree object is returned.
+
+        This should only be called on an entire document, not pieces.
+
+        """
+        # Create a ElementTree from the lines
+        self.root = util.etree.Element(self.markdown.doc_tag)
+        self.parseChunk(self.root, '\n'.join(lines))
+        return util.etree.ElementTree(self.root)
+
+    def parseChunk(self, parent, text):
+        """ Parse a chunk of markdown text and attach to given etree node.
+
+        While the ``text`` argument is generally assumed to contain multiple
+        blocks which will be split on blank lines, it could contain only one
+        block. Generally, this method would be called by extensions when
+        block parsing is required.
+
+        The ``parent`` etree Element passed in is altered in place.
+        Nothing is returned.
+
+        """
+        self.parseBlocks(parent, text.split('\n\n'))
+
+    def parseBlocks(self, parent, blocks):
+        """ Process blocks of markdown text and attach to given etree node.
+
+        Given a list of ``blocks``, each blockprocessor is stepped through
+        until there are no blocks left. While an extension could potentially
+        call this method directly, it's generally expected to be used
+        internally.
+
+        This is a public method as an extension may need to add/alter
+        additional BlockProcessors which call this method to recursively
+        parse a nested block.
+
+        """
+        while blocks:
+            for processor in self.blockprocessors.values():
+                if processor.test(parent, blocks[0]):
+                    if processor.run(parent, blocks) is not False:
+                        # run returns True or None
+                        break
diff --git a/markdown/blockprocessors.py b/markdown/blockprocessors.py
new file mode 100644
index 0000000..29db022
--- /dev/null
+++ b/markdown/blockprocessors.py
@@ -0,0 +1,563 @@
+"""
+CORE MARKDOWN BLOCKPARSER
+===========================================================================
+
+This parser handles basic parsing of Markdown blocks.  It doesn't concern
+itself with inline elements such as **bold** or *italics*, but rather just
+catches blocks, lists, quotes, etc.
+
+The BlockParser is made up of a bunch of BlockProssors, each handling a
+different type of block. Extensions may add/replace/remove BlockProcessors
+as they need to alter how markdown blocks are parsed.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import unicode_literals
+import logging
+import re
+from . import util
+from .blockparser import BlockParser
+
+logger = logging.getLogger('MARKDOWN')
+
+
+def build_block_parser(md_instance, **kwargs):
+    """ Build the default block parser used by Markdown. """
+    parser = BlockParser(md_instance)
+    parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
+    parser.blockprocessors['indent'] = ListIndentProcessor(parser)
+    parser.blockprocessors['code'] = CodeBlockProcessor(parser)
+    parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
+    parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
+    parser.blockprocessors['hr'] = HRProcessor(parser)
+    parser.blockprocessors['olist'] = OListProcessor(parser)
+    parser.blockprocessors['ulist'] = UListProcessor(parser)
+    parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
+    parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
+    return parser
+
+
+class BlockProcessor:
+    """ Base class for block processors.
+
+    Each subclass will provide the methods below to work with the source and
+    tree. Each processor will need to define it's own ``test`` and ``run``
+    methods. The ``test`` method should return True or False, to indicate
+    whether the current block should be processed by this processor. If the
+    test passes, the parser will call the processors ``run`` method.
+
+    """
+
+    def __init__(self, parser):
+        self.parser = parser
+        self.tab_length = parser.markdown.tab_length
+
+    def lastChild(self, parent):
+        """ Return the last child of an etree element. """
+        if len(parent):
+            return parent[-1]
+        else:
+            return None
+
+    def detab(self, text):
+        """ Remove a tab from the front of each line of the given text. """
+        newtext = []
+        lines = text.split('\n')
+        for line in lines:
+            if line.startswith(' '*self.tab_length):
+                newtext.append(line[self.tab_length:])
+            elif not line.strip():
+                newtext.append('')
+            else:
+                break
+        return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
+
+    def looseDetab(self, text, level=1):
+        """ Remove a tab from front of lines but allowing dedented lines. """
+        lines = text.split('\n')
+        for i in range(len(lines)):
+            if lines[i].startswith(' '*self.tab_length*level):
+                lines[i] = lines[i][self.tab_length*level:]
+        return '\n'.join(lines)
+
+    def test(self, parent, block):
+        """ Test for block type. Must be overridden by subclasses.
+
+        As the parser loops through processors, it will call the ``test``
+        method on each to determine if the given block of text is of that
+        type. This method must return a boolean ``True`` or ``False``. The
+        actual method of testing is left to the needs of that particular
+        block type. It could be as simple as ``block.startswith(some_string)``
+        or a complex regular expression. As the block type may be different
+        depending on the parent of the block (i.e. inside a list), the parent
+        etree element is also provided and may be used as part of the test.
+
+        Keywords:
+
+        * ``parent``: A etree element which will be the parent of the block.
+        * ``block``: A block of text from the source which has been split at
+            blank lines.
+        """
+        pass  # pragma: no cover
+
+    def run(self, parent, blocks):
+        """ Run processor. Must be overridden by subclasses.
+
+        When the parser determines the appropriate type of a block, the parser
+        will call the corresponding processor's ``run`` method. This method
+        should parse the individual lines of the block and append them to
+        the etree.
+
+        Note that both the ``parent`` and ``etree`` keywords are pointers
+        to instances of the objects which should be edited in place. Each
+        processor must make changes to the existing objects as there is no
+        mechanism to return new/different objects to replace them.
+
+        This means that this method should be adding SubElements or adding text
+        to the parent, and should remove (``pop``) or add (``insert``) items to
+        the list of blocks.
+
+        Keywords:
+
+        * ``parent``: A etree element which is the parent of the current block.
+        * ``blocks``: A list of all remaining blocks of the document.
+        """
+        pass  # pragma: no cover
+
+
+class ListIndentProcessor(BlockProcessor):
+    """ Process children of list items.
+
+    Example:
+        * a list item
+            process this part
+
+            or this part
+
+    """
+
+    ITEM_TYPES = ['li']
+    LIST_TYPES = ['ul', 'ol']
+
+    def __init__(self, *args):
+        BlockProcessor.__init__(self, *args)
+        self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
+
+    def test(self, parent, block):
+        return block.startswith(' '*self.tab_length) and \
+            not self.parser.state.isstate('detabbed') and \
+            (parent.tag in self.ITEM_TYPES or
+                (len(parent) and parent[-1] is not None and
+                    (parent[-1].tag in self.LIST_TYPES)))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        level, sibling = self.get_level(parent, block)
+        block = self.looseDetab(block, level)
+
+        self.parser.state.set('detabbed')
+        if parent.tag in self.ITEM_TYPES:
+            # It's possible that this parent has a 'ul' or 'ol' child list
+            # with a member.  If that is the case, then that should be the
+            # parent.  This is intended to catch the edge case of an indented
+            # list whose first member was parsed previous to this point
+            # see OListProcessor
+            if len(parent) and parent[-1].tag in self.LIST_TYPES:
+                self.parser.parseBlocks(parent[-1], [block])
+            else:
+                # The parent is already a li. Just parse the child block.
+                self.parser.parseBlocks(parent, [block])
+        elif sibling.tag in self.ITEM_TYPES:
+            # The sibling is a li. Use it as parent.
+            self.parser.parseBlocks(sibling, [block])
+        elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
+            # The parent is a list (``ol`` or ``ul``) which has children.
+            # Assume the last child li is the parent of this block.
+            if sibling[-1].text:
+                # If the parent li has text, that text needs to be moved to a p
+                # The p must be 'inserted' at beginning of list in the event
+                # that other children already exist i.e.; a nested sublist.
+                p = util.etree.Element('p')
+                p.text = sibling[-1].text
+                sibling[-1].text = ''
+                sibling[-1].insert(0, p)
+            self.parser.parseChunk(sibling[-1], block)
+        else:
+            self.create_item(sibling, block)
+        self.parser.state.reset()
+
+    def create_item(self, parent, block):
+        """ Create a new li and parse the block with it as the parent. """
+        li = util.etree.SubElement(parent, 'li')
+        self.parser.parseBlocks(li, [block])
+
+    def get_level(self, parent, block):
+        """ Get level of indent based on list level. """
+        # Get indent level
+        m = self.INDENT_RE.match(block)
+        if m:
+            indent_level = len(m.group(1))/self.tab_length
+        else:
+            indent_level = 0
+        if self.parser.state.isstate('list'):
+            # We're in a tightlist - so we already are at correct parent.
+            level = 1
+        else:
+            # We're in a looselist - so we need to find parent.
+            level = 0
+        # Step through children of tree to find matching indent level.
+        while indent_level > level:
+            child = self.lastChild(parent)
+            if (child is not None and
+               (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
+                if child.tag in self.LIST_TYPES:
+                    level += 1
+                parent = child
+            else:
+                # No more child levels. If we're short of indent_level,
+                # we have a code block. So we stop here.
+                break
+        return level, parent
+
+
+class CodeBlockProcessor(BlockProcessor):
+    """ Process code blocks. """
+
+    def test(self, parent, block):
+        return block.startswith(' '*self.tab_length)
+
+    def run(self, parent, blocks):
+        sibling = self.lastChild(parent)
+        block = blocks.pop(0)
+        theRest = ''
+        if (sibling is not None and sibling.tag == "pre" and
+           len(sibling) and sibling[0].tag == "code"):
+            # The previous block was a code block. As blank lines do not start
+            # new code blocks, append this block to the previous, adding back
+            # linebreaks removed from the split into a list.
+            code = sibling[0]
+            block, theRest = self.detab(block)
+            code.text = util.AtomicString(
+                '%s\n%s\n' % (code.text, block.rstrip())
+            )
+        else:
+            # This is a new codeblock. Create the elements and insert text.
+            pre = util.etree.SubElement(parent, 'pre')
+            code = util.etree.SubElement(pre, 'code')
+            block, theRest = self.detab(block)
+            code.text = util.AtomicString('%s\n' % block.rstrip())
+        if theRest:
+            # This block contained unindented line(s) after the first indented
+            # line. Insert these lines as the first block of the master blocks
+            # list for future processing.
+            blocks.insert(0, theRest)
+
+
+class BlockQuoteProcessor(BlockProcessor):
+
+    RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        if m:
+            before = block[:m.start()]  # Lines before blockquote
+            # Pass lines before blockquote in recursively for parsing forst.
+            self.parser.parseBlocks(parent, [before])
+            # Remove ``> `` from begining of each line.
+            block = '\n'.join(
+                [self.clean(line) for line in block[m.start():].split('\n')]
+            )
+        sibling = self.lastChild(parent)
+        if sibling is not None and sibling.tag == "blockquote":
+            # Previous block was a blockquote so set that as this blocks parent
+            quote = sibling
+        else:
+            # This is a new blockquote. Create a new parent element.
+            quote = util.etree.SubElement(parent, 'blockquote')
+        # Recursively parse block with blockquote as parent.
+        # change parser state so blockquotes embedded in lists use p tags
+        self.parser.state.set('blockquote')
+        self.parser.parseChunk(quote, block)
+        self.parser.state.reset()
+
+    def clean(self, line):
+        """ Remove ``>`` from beginning of a line. """
+        m = self.RE.match(line)
+        if line.strip() == ">":
+            return ""
+        elif m:
+            return m.group(2)
+        else:
+            return line
+
+
+class OListProcessor(BlockProcessor):
+    """ Process ordered list blocks. """
+
+    TAG = 'ol'
+    # Detect an item (``1. item``). ``group(1)`` contains contents of item.
+    RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
+    # Detect items on secondary lines. they can be of either list type.
+    CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
+    # Detect indented (nested) items of either type
+    INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
+    # The integer (python string) with which the lists starts (default=1)
+    # Eg: If list is intialized as)
+    #   3. Item
+    # The ol tag will get starts="3" attribute
+    STARTSWITH = '1'
+    # List of allowed sibling tags.
+    SIBLING_TAGS = ['ol', 'ul']
+
+    def test(self, parent, block):
+        return bool(self.RE.match(block))
+
+    def run(self, parent, blocks):
+        # Check fr multiple items in one block.
+        items = self.get_items(blocks.pop(0))
+        sibling = self.lastChild(parent)
+
+        if sibling is not None and sibling.tag in self.SIBLING_TAGS:
+            # Previous block was a list item, so set that as parent
+            lst = sibling
+            # make sure previous item is in a p- if the item has text,
+            # then it isn't in a p
+            if lst[-1].text:
+                # since it's possible there are other children for this
+                # sibling, we can't just SubElement the p, we need to
+                # insert it as the first item.
+                p = util.etree.Element('p')
+                p.text = lst[-1].text
+                lst[-1].text = ''
+                lst[-1].insert(0, p)
+            # if the last item has a tail, then the tail needs to be put in a p
+            # likely only when a header is not followed by a blank line
+            lch = self.lastChild(lst[-1])
+            if lch is not None and lch.tail:
+                p = util.etree.SubElement(lst[-1], 'p')
+                p.text = lch.tail.lstrip()
+                lch.tail = ''
+
+            # parse first block differently as it gets wrapped in a p.
+            li = util.etree.SubElement(lst, 'li')
+            self.parser.state.set('looselist')
+            firstitem = items.pop(0)
+            self.parser.parseBlocks(li, [firstitem])
+            self.parser.state.reset()
+        elif parent.tag in ['ol', 'ul']:
+            # this catches the edge case of a multi-item indented list whose
+            # first item is in a blank parent-list item:
+            # * * subitem1
+            #     * subitem2
+            # see also ListIndentProcessor
+            lst = parent
+        else:
+            # This is a new list so create parent with appropriate tag.
+            lst = util.etree.SubElement(parent, self.TAG)
+            # Check if a custom start integer is set
+            if not self.parser.markdown.lazy_ol and self.STARTSWITH != '1':
+                lst.attrib['start'] = self.STARTSWITH
+
+        self.parser.state.set('list')
+        # Loop through items in block, recursively parsing each with the
+        # appropriate parent.
+        for item in items:
+            if item.startswith(' '*self.tab_length):
+                # Item is indented. Parse with last item as parent
+                self.parser.parseBlocks(lst[-1], [item])
+            else:
+                # New item. Create li and parse with it as parent
+                li = util.etree.SubElement(lst, 'li')
+                self.parser.parseBlocks(li, [item])
+        self.parser.state.reset()
+
+    def get_items(self, block):
+        """ Break a block into list items. """
+        items = []
+        for line in block.split('\n'):
+            m = self.CHILD_RE.match(line)
+            if m:
+                # This is a new list item
+                # Check first item for the start index
+                if not items and self.TAG == 'ol':
+                    # Detect the integer value of first list item
+                    INTEGER_RE = re.compile('(\d+)')
+                    self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
+                # Append to the list
+                items.append(m.group(3))
+            elif self.INDENT_RE.match(line):
+                # This is an indented (possibly nested) item.
+                if items[-1].startswith(' '*self.tab_length):
+                    # Previous item was indented. Append to that item.
+                    items[-1] = '%s\n%s' % (items[-1], line)
+                else:
+                    items.append(line)
+            else:
+                # This is another line of previous item. Append to that item.
+                items[-1] = '%s\n%s' % (items[-1], line)
+        return items
+
+
+class UListProcessor(OListProcessor):
+    """ Process unordered list blocks. """
+
+    TAG = 'ul'
+    RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
+
+
+class HashHeaderProcessor(BlockProcessor):
+    """ Process Hash Headers. """
+
+    # Detect a header at start of any line in block
+    RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        if m:
+            before = block[:m.start()]  # All lines before header
+            after = block[m.end():]     # All lines after header
+            if before:
+                # As the header was not the first line of the block and the
+                # lines before the header must be parsed first,
+                # recursively parse this lines as a block.
+                self.parser.parseBlocks(parent, [before])
+            # Create header using named groups from RE
+            h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
+            h.text = m.group('header').strip()
+            if after:
+                # Insert remaining lines as first block for future parsing.
+                blocks.insert(0, after)
+        else:  # pragma: no cover
+            # This should never happen, but just in case...
+            logger.warn("We've got a problem header: %r" % block)
+
+
+class SetextHeaderProcessor(BlockProcessor):
+    """ Process Setext-style Headers. """
+
+    # Detect Setext-style header. Must be first 2 lines of block.
+    RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
+
+    def test(self, parent, block):
+        return bool(self.RE.match(block))
+
+    def run(self, parent, blocks):
+        lines = blocks.pop(0).split('\n')
+        # Determine level. ``=`` is 1 and ``-`` is 2.
+        if lines[1].startswith('='):
+            level = 1
+        else:
+            level = 2
+        h = util.etree.SubElement(parent, 'h%d' % level)
+        h.text = lines[0].strip()
+        if len(lines) > 2:
+            # Block contains additional lines. Add to  master blocks for later.
+            blocks.insert(0, '\n'.join(lines[2:]))
+
+
+class HRProcessor(BlockProcessor):
+    """ Process Horizontal Rules. """
+
+    RE = r'^[ ]{0,3}((-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,})[ ]*'
+    # Detect hr on any line of a block.
+    SEARCH_RE = re.compile(RE, re.MULTILINE)
+
+    def test(self, parent, block):
+        m = self.SEARCH_RE.search(block)
+        # No atomic grouping in python so we simulate it here for performance.
+        # The regex only matches what would be in the atomic group - the HR.
+        # Then check if we are at end of block or if next char is a newline.
+        if m and (m.end() == len(block) or block[m.end()] == '\n'):
+            # Save match object on class instance so we can use it later.
+            self.match = m
+            return True
+        return False
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        # Check for lines in block before hr.
+        prelines = block[:self.match.start()].rstrip('\n')
+        if prelines:
+            # Recursively parse lines before hr so they get parsed first.
+            self.parser.parseBlocks(parent, [prelines])
+        # create hr
+        util.etree.SubElement(parent, 'hr')
+        # check for lines in block after hr.
+        postlines = block[self.match.end():].lstrip('\n')
+        if postlines:
+            # Add lines after hr to master blocks for later parsing.
+            blocks.insert(0, postlines)
+
+
+class EmptyBlockProcessor(BlockProcessor):
+    """ Process blocks that are empty or start with an empty line. """
+
+    def test(self, parent, block):
+        return not block or block.startswith('\n')
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        filler = '\n\n'
+        if block:
+            # Starts with empty line
+            # Only replace a single line.
+            filler = '\n'
+            # Save the rest for later.
+            theRest = block[1:]
+            if theRest:
+                # Add remaining lines to master blocks for later.
+                blocks.insert(0, theRest)
+        sibling = self.lastChild(parent)
+        if (sibling is not None and sibling.tag == 'pre' and
+           len(sibling) and sibling[0].tag == 'code'):
+            # Last block is a codeblock. Append to preserve whitespace.
+            sibling[0].text = util.AtomicString(
+                '%s%s' % (sibling[0].text, filler)
+            )
+
+
+class ParagraphProcessor(BlockProcessor):
+    """ Process Paragraph blocks. """
+
+    def test(self, parent, block):
+        return True
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        if block.strip():
+            # Not a blank block. Add to parent, otherwise throw it away.
+            if self.parser.state.isstate('list'):
+                # The parent is a tight-list.
+                #
+                # Check for any children. This will likely only happen in a
+                # tight-list when a header isn't followed by a blank line.
+                # For example:
+                #
+                #     * # Header
+                #     Line 2 of list item - not part of header.
+                sibling = self.lastChild(parent)
+                if sibling is not None:
+                    # Insetrt after sibling.
+                    if sibling.tail:
+                        sibling.tail = '%s\n%s' % (sibling.tail, block)
+                    else:
+                        sibling.tail = '\n%s' % block
+                else:
+                    # Append to parent.text
+                    if parent.text:
+                        parent.text = '%s\n%s' % (parent.text, block)
+                    else:
+                        parent.text = block.lstrip()
+            else:
+                # Create a regular paragraph
+                p = util.etree.SubElement(parent, 'p')
+                p.text = block.lstrip()
diff --git a/markdown/extensions/__init__.py b/markdown/extensions/__init__.py
new file mode 100644
index 0000000..6e7a08a
--- /dev/null
+++ b/markdown/extensions/__init__.py
@@ -0,0 +1,100 @@
+"""
+Extensions
+-----------------------------------------------------------------------------
+"""
+
+from __future__ import unicode_literals
+from ..util import parseBoolValue
+import warnings
+
+
+class Extension(object):
+    """ Base class for extensions to subclass. """
+
+    # Default config -- to be overriden by a subclass
+    # Must be of the following format:
+    #     {
+    #       'key': ['value', 'description']
+    #     }
+    # Note that Extension.setConfig will raise a KeyError
+    # if a default is not set here.
+    config = {}
+
+    def __init__(self, *args, **kwargs):
+        """ Initiate Extension and set up configs. """
+
+        # check for configs arg for backward compat.
+        # (there only ever used to be one so we use arg[0])
+        if len(args):
+            if args[0] is not None:
+                self.setConfigs(args[0])
+            warnings.warn('Extension classes accepting positional args is '
+                          'pending Deprecation. Each setting should be '
+                          'passed into the Class as a keyword. Positional '
+                          'args are deprecated and will raise '
+                          'an error in version 2.7. See the Release Notes for '
+                          'Python-Markdown version 2.6 for more info.',
+                          DeprecationWarning)
+        # check for configs kwarg for backward compat.
+        if 'configs' in kwargs.keys():
+            if kwargs['configs'] is not None:
+                self.setConfigs(kwargs.pop('configs', {}))
+            warnings.warn('Extension classes accepting a dict on the single '
+                          'keyword "config" is pending Deprecation. Each '
+                          'setting should be passed into the Class as a '
+                          'keyword directly. The "config" keyword is '
+                          'deprecated and raise an error in '
+                          'version 2.7. See the Release Notes for '
+                          'Python-Markdown version 2.6 for more info.',
+                          DeprecationWarning)
+        # finally, use kwargs
+        self.setConfigs(kwargs)
+
+    def getConfig(self, key, default=''):
+        """ Return a setting for the given key or an empty string. """
+        if key in self.config:
+            return self.config[key][0]
+        else:
+            return default
+
+    def getConfigs(self):
+        """ Return all configs settings as a dict. """
+        return dict([(key, self.getConfig(key)) for key in self.config.keys()])
+
+    def getConfigInfo(self):
+        """ Return all config descriptions as a list of tuples. """
+        return [(key, self.config[key][1]) for key in self.config.keys()]
+
+    def setConfig(self, key, value):
+        """ Set a config setting for `key` with the given `value`. """
+        if isinstance(self.config[key][0], bool):
+            value = parseBoolValue(value)
+        if self.config[key][0] is None:
+            value = parseBoolValue(value, preserve_none=True)
+        self.config[key][0] = value
+
+    def setConfigs(self, items):
+        """ Set multiple config settings given a dict or list of tuples. """
+        if hasattr(items, 'items'):
+            # it's a dict
+            items = items.items()
+        for key, value in items:
+            self.setConfig(key, value)
+
+    def extendMarkdown(self, md, md_globals):
+        """
+        Add the various proccesors and patterns to the Markdown Instance.
+
+        This method must be overriden by every extension.
+
+        Keyword arguments:
+
+        * md: The Markdown instance.
+
+        * md_globals: Global variables in the markdown module namespace.
+
+        """
+        raise NotImplementedError(
+            'Extension "%s.%s" must define an "extendMarkdown"'
+            'method.' % (self.__class__.__module__, self.__class__.__name__)
+        )
diff --git a/markdown/extensions/abbr.py b/markdown/extensions/abbr.py
new file mode 100644
index 0000000..353d126
--- /dev/null
+++ b/markdown/extensions/abbr.py
@@ -0,0 +1,91 @@
+'''
+Abbreviation Extension for Python-Markdown
+==========================================
+
+This extension adds abbreviation handling to Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
+for documentation.
+
+Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
+ [Seemant Kulleen](http://www.kulleen.org/)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+'''
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+from ..inlinepatterns import Pattern
+from ..util import etree, AtomicString
+import re
+
+# Global Vars
+ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
+
+
+class AbbrExtension(Extension):
+    """ Abbreviation Extension for Python-Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Insert AbbrPreprocessor before ReferencePreprocessor. """
+        md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
+
+
+class AbbrPreprocessor(Preprocessor):
+    """ Abbreviation Preprocessor - parse text for abbr references. """
+
+    def run(self, lines):
+        '''
+        Find and remove all Abbreviation references from the text.
+        Each reference is set as a new AbbrPattern in the markdown instance.
+
+        '''
+        new_text = []
+        for line in lines:
+            m = ABBR_REF_RE.match(line)
+            if m:
+                abbr = m.group('abbr').strip()
+                title = m.group('title').strip()
+                self.markdown.inlinePatterns['abbr-%s' % abbr] = \
+                    AbbrPattern(self._generate_pattern(abbr), title)
+            else:
+                new_text.append(line)
+        return new_text
+
+    def _generate_pattern(self, text):
+        '''
+        Given a string, returns an regex pattern to match that string.
+
+        'HTML' -> r'(?P<abbr>[H][T][M][L])'
+
+        Note: we force each char as a literal match (in brackets) as we don't
+        know what they will be beforehand.
+
+        '''
+        chars = list(text)
+        for i in range(len(chars)):
+            chars[i] = r'[%s]' % chars[i]
+        return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
+
+
+class AbbrPattern(Pattern):
+    """ Abbreviation inline pattern. """
+
+    def __init__(self, pattern, title):
+        super(AbbrPattern, self).__init__(pattern)
+        self.title = title
+
+    def handleMatch(self, m):
+        abbr = etree.Element('abbr')
+        abbr.text = AtomicString(m.group('abbr'))
+        abbr.set('title', self.title)
+        return abbr
+
+
+def makeExtension(*args, **kwargs):
+    return AbbrExtension(*args, **kwargs)
diff --git a/markdown/extensions/admonition.py b/markdown/extensions/admonition.py
new file mode 100644
index 0000000..76e0fb5
--- /dev/null
+++ b/markdown/extensions/admonition.py
@@ -0,0 +1,96 @@
+"""
+Admonition extension for Python-Markdown
+========================================
+
+Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
+
+[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions  # noqa
+
+See <https://pythonhosted.org/Markdown/extensions/admonition.html>
+for documentation.
+
+Original code Copyright [Tiago Serafim](http://www.tiagoserafim.com/).
+
+All changes Copyright The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from ..util import etree
+import re
+
+
+class AdmonitionExtension(Extension):
+    """ Admonition extension for Python-Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add Admonition to Markdown instance. """
+        md.registerExtension(self)
+
+        md.parser.blockprocessors.add('admonition',
+                                      AdmonitionProcessor(md.parser),
+                                      '_begin')
+
+
+class AdmonitionProcessor(BlockProcessor):
+
+    CLASSNAME = 'admonition'
+    CLASSNAME_TITLE = 'admonition-title'
+    RE = re.compile(r'(?:^|\n)!!!\ ?([\w\-]+)(?:\ "(.*?)")?')
+
+    def test(self, parent, block):
+        sibling = self.lastChild(parent)
+        return self.RE.search(block) or \
+            (block.startswith(' ' * self.tab_length) and sibling is not None and
+             sibling.get('class', '').find(self.CLASSNAME) != -1)
+
+    def run(self, parent, blocks):
+        sibling = self.lastChild(parent)
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+
+        if m:
+            block = block[m.end() + 1:]  # removes the first line
+
+        block, theRest = self.detab(block)
+
+        if m:
+            klass, title = self.get_class_and_title(m)
+            div = etree.SubElement(parent, 'div')
+            div.set('class', '%s %s' % (self.CLASSNAME, klass))
+            if title:
+                p = etree.SubElement(div, 'p')
+                p.text = title
+                p.set('class', self.CLASSNAME_TITLE)
+        else:
+            div = sibling
+
+        self.parser.parseChunk(div, block)
+
+        if theRest:
+            # This block contained unindented line(s) after the first indented
+            # line. Insert these lines as the first block of the master blocks
+            # list for future processing.
+            blocks.insert(0, theRest)
+
+    def get_class_and_title(self, match):
+        klass, title = match.group(1).lower(), match.group(2)
+        if title is None:
+            # no title was provided, use the capitalized classname as title
+            # e.g.: `!!! note` will render
+            # `<p class="admonition-title">Note</p>`
+            title = klass.capitalize()
+        elif title == '':
+            # an explicit blank title should not be rendered
+            # e.g.: `!!! warning ""` will *not* render `p` with a title
+            title = None
+        return klass, title
+
+
+def makeExtension(*args, **kwargs):
+    return AdmonitionExtension(*args, **kwargs)
diff --git a/markdown/extensions/attr_list.py b/markdown/extensions/attr_list.py
new file mode 100644
index 0000000..683bdf8
--- /dev/null
+++ b/markdown/extensions/attr_list.py
@@ -0,0 +1,177 @@
+"""
+Attribute List Extension for Python-Markdown
+============================================
+
+Adds attribute list syntax. Inspired by
+[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
+feature of the same name.
+
+See <https://pythonhosted.org/Markdown/extensions/attr_list.html>
+for documentation.
+
+Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
+
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import isBlockLevel
+import re
+
+try:
+    Scanner = re.Scanner
+except AttributeError:  # pragma: no cover
+    # must be on Python 2.4
+    from sre import Scanner
+
+
+def _handle_double_quote(s, t):
+    k, v = t.split('=')
+    return k, v.strip('"')
+
+
+def _handle_single_quote(s, t):
+    k, v = t.split('=')
+    return k, v.strip("'")
+
+
+def _handle_key_value(s, t):
+    return t.split('=')
+
+
+def _handle_word(s, t):
+    if t.startswith('.'):
+        return '.', t[1:]
+    if t.startswith('#'):
+        return 'id', t[1:]
+    return t, t
+
+_scanner = Scanner([
+    (r'[^ ]+=".*?"', _handle_double_quote),
+    (r"[^ ]+='.*?'", _handle_single_quote),
+    (r'[^ ]+=[^ =]+', _handle_key_value),
+    (r'[^ =]+', _handle_word),
+    (r' ', None)
+])
+
+
+def get_attrs(str):
+    """ Parse attribute list and return a list of attribute tuples. """
+    return _scanner.scan(str)[0]
+
+
+def isheader(elem):
+    return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
+
+
+class AttrListTreeprocessor(Treeprocessor):
+
+    BASE_RE = r'\{\:?([^\}]*)\}'
+    HEADER_RE = re.compile(r'[ ]+%s[ ]*$' % BASE_RE)
+    BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE)
+    INLINE_RE = re.compile(r'^%s' % BASE_RE)
+    NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
+                         r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
+                         r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
+                         r'\uf900-\ufdcf\ufdf0-\ufffd'
+                         r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
+
+    def run(self, doc):
+        for elem in doc.getiterator():
+            if isBlockLevel(elem.tag):
+                # Block level: check for attrs on last line of text
+                RE = self.BLOCK_RE
+                if isheader(elem) or elem.tag == 'dt':
+                    # header or def-term: check for attrs at end of line
+                    RE = self.HEADER_RE
+                if len(elem) and elem.tag == 'li':
+                    # special case list items. children may include a ul or ol.
+                    pos = None
+                    # find the ul or ol position
+                    for i, child in enumerate(elem):
+                        if child.tag in ['ul', 'ol']:
+                            pos = i
+                            break
+                    if pos is None and elem[-1].tail:
+                        # use tail of last child. no ul or ol.
+                        m = RE.search(elem[-1].tail)
+                        if m:
+                            self.assign_attrs(elem, m.group(1))
+                            elem[-1].tail = elem[-1].tail[:m.start()]
+                    elif pos is not None and pos > 0 and elem[pos-1].tail:
+                        # use tail of last child before ul or ol
+                        m = RE.search(elem[pos-1].tail)
+                        if m:
+                            self.assign_attrs(elem, m.group(1))
+                            elem[pos-1].tail = elem[pos-1].tail[:m.start()]
+                    elif elem.text:
+                        # use text. ul is first child.
+                        m = RE.search(elem.text)
+                        if m:
+                            self.assign_attrs(elem, m.group(1))
+                            elem.text = elem.text[:m.start()]
+                elif len(elem) and elem[-1].tail:
+                    # has children. Get from tail of last child
+                    m = RE.search(elem[-1].tail)
+                    if m:
+                        self.assign_attrs(elem, m.group(1))
+                        elem[-1].tail = elem[-1].tail[:m.start()]
+                        if isheader(elem):
+                            # clean up trailing #s
+                            elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
+                elif elem.text:
+                    # no children. Get from text.
+                    m = RE.search(elem.text)
+                    if not m and elem.tag == 'td':
+                        m = re.search(self.BASE_RE, elem.text)
+                    if m:
+                        self.assign_attrs(elem, m.group(1))
+                        elem.text = elem.text[:m.start()]
+                        if isheader(elem):
+                            # clean up trailing #s
+                            elem.text = elem.text.rstrip('#').rstrip()
+            else:
+                # inline: check for attrs at start of tail
+                if elem.tail:
+                    m = self.INLINE_RE.match(elem.tail)
+                    if m:
+                        self.assign_attrs(elem, m.group(1))
+                        elem.tail = elem.tail[m.end():]
+
+    def assign_attrs(self, elem, attrs):
+        """ Assign attrs to element. """
+        for k, v in get_attrs(attrs):
+            if k == '.':
+                # add to class
+                cls = elem.get('class')
+                if cls:
+                    elem.set('class', '%s %s' % (cls, v))
+                else:
+                    elem.set('class', v)
+            else:
+                # assign attr k with v
+                elem.set(self.sanitize_name(k), v)
+
+    def sanitize_name(self, name):
+        """
+        Sanitize name as 'an XML Name, minus the ":"'.
+        See http://www.w3.org/TR/REC-xml-names/#NT-NCName
+        """
+        return self.NAME_RE.sub('_', name)
+
+
+class AttrListExtension(Extension):
+    def extendMarkdown(self, md, md_globals):
+        md.treeprocessors.add(
+            'attr_list', AttrListTreeprocessor(md), '>prettify'
+        )
+
+
+def makeExtension(*args, **kwargs):
+    return AttrListExtension(*args, **kwargs)
diff --git a/markdown/extensions/codehilite.py b/markdown/extensions/codehilite.py
new file mode 100644
index 0000000..0657c37
--- /dev/null
+++ b/markdown/extensions/codehilite.py
@@ -0,0 +1,265 @@
+"""
+CodeHilite Extension for Python-Markdown
+========================================
+
+Adds code/syntax highlighting to standard Python-Markdown code blocks.
+
+See <https://pythonhosted.org/Markdown/extensions/code_hilite.html>
+for documentation.
+
+Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+
+try:
+    from pygments import highlight
+    from pygments.lexers import get_lexer_by_name, guess_lexer
+    from pygments.formatters import get_formatter_by_name
+    pygments = True
+except ImportError:
+    pygments = False
+
+
+def parse_hl_lines(expr):
+    """Support our syntax for emphasizing certain lines of code.
+
+    expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
+    Returns a list of ints, the line numbers to emphasize.
+    """
+    if not expr:
+        return []
+
+    try:
+        return list(map(int, expr.split()))
+    except ValueError:
+        return []
+
+
+# ------------------ The Main CodeHilite Class ----------------------
+class CodeHilite(object):
+    """
+    Determine language of source code, and pass it into pygments hilighter.
+
+    Basic Usage:
+        >>> code = CodeHilite(src = 'some text')
+        >>> html = code.hilite()
+
+    * src: Source string or any object with a .readline attribute.
+
+    * linenums: (Boolean) Set line numbering to 'on' (True),
+      'off' (False) or 'auto'(None). Set to 'auto' by default.
+
+    * guess_lang: (Boolean) Turn language auto-detection
+      'on' or 'off' (on by default).
+
+    * css_class: Set class name of wrapper div ('codehilite' by default).
+
+    * hl_lines: (List of integers) Lines to emphasize, 1-indexed.
+
+    Low Level Usage:
+        >>> code = CodeHilite()
+        >>> code.src = 'some text' # String or anything with a .readline attr.
+        >>> code.linenos = True  # Turns line numbering on or of.
+        >>> html = code.hilite()
+
+    """
+
+    def __init__(self, src=None, linenums=None, guess_lang=True,
+                 css_class="codehilite", lang=None, style='default',
+                 noclasses=False, tab_length=4, hl_lines=None, use_pygments=True):
+        self.src = src
+        self.lang = lang
+        self.linenums = linenums
+        self.guess_lang = guess_lang
+        self.css_class = css_class
+        self.style = style
+        self.noclasses = noclasses
+        self.tab_length = tab_length
+        self.hl_lines = hl_lines or []
+        self.use_pygments = use_pygments
+
+    def hilite(self):
+        """
+        Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
+        optional line numbers. The output should then be styled with css to
+        your liking. No styles are applied by default - only styling hooks
+        (i.e.: <span class="k">).
+
+        returns : A string of html.
+
+        """
+
+        self.src = self.src.strip('\n')
+
+        if self.lang is None:
+            self._parseHeader()
+
+        if pygments and self.use_pygments:
+            try:
+                lexer = get_lexer_by_name(self.lang)
+            except ValueError:
+                try:
+                    if self.guess_lang:
+                        lexer = guess_lexer(self.src)
+                    else:
+                        lexer = get_lexer_by_name('text')
+                except ValueError:
+                    lexer = get_lexer_by_name('text')
+            formatter = get_formatter_by_name('html',
+                                              linenos=self.linenums,
+                                              cssclass=self.css_class,
+                                              style=self.style,
+                                              noclasses=self.noclasses,
+                                              hl_lines=self.hl_lines)
+            return highlight(self.src, lexer, formatter)
+        else:
+            # just escape and build markup usable by JS highlighting libs
+            txt = self.src.replace('&', '&amp;')
+            txt = txt.replace('<', '&lt;')
+            txt = txt.replace('>', '&gt;')
+            txt = txt.replace('"', '&quot;')
+            classes = []
+            if self.lang:
+                classes.append('language-%s' % self.lang)
+            if self.linenums:
+                classes.append('linenums')
+            class_str = ''
+            if classes:
+                class_str = ' class="%s"' % ' '.join(classes)
+            return '<pre class="%s"><code%s>%s</code></pre>\n' % \
+                   (self.css_class, class_str, txt)
+
+    def _parseHeader(self):
+        """
+        Determines language of a code block from shebang line and whether said
+        line should be removed or left in place. If the sheband line contains a
+        path (even a single /) then it is assumed to be a real shebang line and
+        left alone. However, if no path is given (e.i.: #!python or :::python)
+        then it is assumed to be a mock shebang for language identifitation of
+        a code fragment and removed from the code block prior to processing for
+        code highlighting. When a mock shebang (e.i: #!python) is found, line
+        numbering is turned on. When colons are found in place of a shebang
+        (e.i.: :::python), line numbering is left in the current state - off
+        by default.
+
+        Also parses optional list of highlight lines, like:
+
+            :::python hl_lines="1 3"
+        """
+
+        import re
+
+        # split text into lines
+        lines = self.src.split("\n")
+        # pull first line to examine
+        fl = lines.pop(0)
+
+        c = re.compile(r'''
+            (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
+            (?P<path>(?:/\w+)*[/ ])?        # Zero or 1 path
+            (?P<lang>[\w+-]*)               # The language
+            \s*                             # Arbitrary whitespace
+            # Optional highlight lines, single- or double-quote-delimited
+            (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
+            ''',  re.VERBOSE)
+        # search first line for shebang
+        m = c.search(fl)
+        if m:
+            # we have a match
+            try:
+                self.lang = m.group('lang').lower()
+            except IndexError:
+                self.lang = None
+            if m.group('path'):
+                # path exists - restore first line
+                lines.insert(0, fl)
+            if self.linenums is None and m.group('shebang'):
+                # Overridable and Shebang exists - use line numbers
+                self.linenums = True
+
+            self.hl_lines = parse_hl_lines(m.group('hl_lines'))
+        else:
+            # No match
+            lines.insert(0, fl)
+
+        self.src = "\n".join(lines).strip("\n")
+
+
+# ------------------ The Markdown Extension -------------------------------
+
+
+class HiliteTreeprocessor(Treeprocessor):
+    """ Hilight source code in code blocks. """
+
+    def run(self, root):
+        """ Find code blocks and store in htmlStash. """
+        blocks = root.iter('pre')
+        for block in blocks:
+            if len(block) == 1 and block[0].tag == 'code':
+                code = CodeHilite(
+                    block[0].text,
+                    linenums=self.config['linenums'],
+                    guess_lang=self.config['guess_lang'],
+                    css_class=self.config['css_class'],
+                    style=self.config['pygments_style'],
+                    noclasses=self.config['noclasses'],
+                    tab_length=self.markdown.tab_length,
+                    use_pygments=self.config['use_pygments']
+                )
+                placeholder = self.markdown.htmlStash.store(code.hilite(),
+                                                            safe=True)
+                # Clear codeblock in etree instance
+                block.clear()
+                # Change to p element which will later
+                # be removed when inserting raw html
+                block.tag = 'p'
+                block.text = placeholder
+
+
+class CodeHiliteExtension(Extension):
+    """ Add source code hilighting to markdown codeblocks. """
+
+    def __init__(self, *args, **kwargs):
+        # define default configs
+        self.config = {
+            'linenums': [None,
+                         "Use lines numbers. True=yes, False=no, None=auto"],
+            'guess_lang': [True,
+                           "Automatic language detection - Default: True"],
+            'css_class': ["codehilite",
+                          "Set class name for wrapper <div> - "
+                          "Default: codehilite"],
+            'pygments_style': ['default',
+                               'Pygments HTML Formatter Style '
+                               '(Colorscheme) - Default: default'],
+            'noclasses': [False,
+                          'Use inline styles instead of CSS classes - '
+                          'Default false'],
+            'use_pygments': [True,
+                             'Use Pygments to Highlight code blocks. '
+                             'Disable if using a JavaScript library. '
+                             'Default: True']
+            }
+
+        super(CodeHiliteExtension, self).__init__(*args, **kwargs)
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add HilitePostprocessor to Markdown instance. """
+        hiliter = HiliteTreeprocessor(md)
+        hiliter.config = self.getConfigs()
+        md.treeprocessors.add("hilite", hiliter, "<inline")
+
+        md.registerExtension(self)
+
+
+def makeExtension(*args, **kwargs):
+    return CodeHiliteExtension(*args, **kwargs)
diff --git a/markdown/extensions/def_list.py b/markdown/extensions/def_list.py
new file mode 100644
index 0000000..77cca6e
--- /dev/null
+++ b/markdown/extensions/def_list.py
@@ -0,0 +1,115 @@
+"""
+Definition List Extension for Python-Markdown
+=============================================
+
+Adds parsing of Definition Lists to Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/definition_lists.html>
+for documentation.
+
+Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import BlockProcessor, ListIndentProcessor
+from ..util import etree
+import re
+
+
+class DefListProcessor(BlockProcessor):
+    """ Process Definition Lists. """
+
+    RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
+    NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+
+        raw_block = blocks.pop(0)
+        m = self.RE.search(raw_block)
+        terms = [l.strip() for l in
+                 raw_block[:m.start()].split('\n') if l.strip()]
+        block = raw_block[m.end():]
+        no_indent = self.NO_INDENT_RE.match(block)
+        if no_indent:
+            d, theRest = (block, None)
+        else:
+            d, theRest = self.detab(block)
+        if d:
+            d = '%s\n%s' % (m.group(2), d)
+        else:
+            d = m.group(2)
+        sibling = self.lastChild(parent)
+        if not terms and sibling is None:
+            # This is not a definition item. Most likely a paragraph that
+            # starts with a colon at the begining of a document or list.
+            blocks.insert(0, raw_block)
+            return False
+        if not terms and sibling.tag == 'p':
+            # The previous paragraph contains the terms
+            state = 'looselist'
+            terms = sibling.text.split('\n')
+            parent.remove(sibling)
+            # Aquire new sibling
+            sibling = self.lastChild(parent)
+        else:
+            state = 'list'
+
+        if sibling is not None and sibling.tag == 'dl':
+            # This is another item on an existing list
+            dl = sibling
+            if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
+                state = 'looselist'
+        else:
+            # This is a new list
+            dl = etree.SubElement(parent, 'dl')
+        # Add terms
+        for term in terms:
+            dt = etree.SubElement(dl, 'dt')
+            dt.text = term
+        # Add definition
+        self.parser.state.set(state)
+        dd = etree.SubElement(dl, 'dd')
+        self.parser.parseBlocks(dd, [d])
+        self.parser.state.reset()
+
+        if theRest:
+            blocks.insert(0, theRest)
+
+
+class DefListIndentProcessor(ListIndentProcessor):
+    """ Process indented children of definition list items. """
+
+    ITEM_TYPES = ['dd']
+    LIST_TYPES = ['dl']
+
+    def create_item(self, parent, block):
+        """ Create a new dd and parse the block with it as the parent. """
+        dd = etree.SubElement(parent, 'dd')
+        self.parser.parseBlocks(dd, [block])
+
+
+class DefListExtension(Extension):
+    """ Add definition lists to Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add an instance of DefListProcessor to BlockParser. """
+        md.parser.blockprocessors.add('defindent',
+                                      DefListIndentProcessor(md.parser),
+                                      '>indent')
+        md.parser.blockprocessors.add('deflist',
+                                      DefListProcessor(md.parser),
+                                      '>ulist')
+
+
+def makeExtension(*args, **kwargs):
+    return DefListExtension(*args, **kwargs)
diff --git a/markdown/extensions/extra.py b/markdown/extensions/extra.py
new file mode 100644
index 0000000..de5db03
--- /dev/null
+++ b/markdown/extensions/extra.py
@@ -0,0 +1,132 @@
+"""
+Python-Markdown Extra Extension
+===============================
+
+A compilation of various Python-Markdown extensions that imitates
+[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
+
+Note that each of the individual extensions still need to be available
+on your PYTHONPATH. This extension simply wraps them all up as a
+convenience so that only one extension needs to be listed when
+initiating Markdown. See the documentation for each individual
+extension for specifics about that extension.
+
+There may be additional extensions that are distributed with
+Python-Markdown that are not included here in Extra. Those extensions
+are not part of PHP Markdown Extra, and therefore, not part of
+Python-Markdown Extra. If you really would like Extra to include
+additional extensions, we suggest creating your own clone of Extra
+under a differant name. You could also edit the `extensions` global
+variable defined below, but be aware that such changes may be lost
+when you upgrade to any future version of Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/extra.html>
+for documentation.
+
+Copyright The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from .. import util
+import re
+
+extensions = [
+    'markdown.extensions.smart_strong',
+    'markdown.extensions.fenced_code',
+    'markdown.extensions.footnotes',
+    'markdown.extensions.attr_list',
+    'markdown.extensions.def_list',
+    'markdown.extensions.tables',
+    'markdown.extensions.abbr'
+]
+
+
+class ExtraExtension(Extension):
+    """ Add various extensions to Markdown class."""
+
+    def __init__(self, *args, **kwargs):
+        """ config is a dumb holder which gets passed to actual ext later. """
+        self.config = kwargs.pop('configs', {})
+        self.config.update(kwargs)
+
+    def extendMarkdown(self, md, md_globals):
+        """ Register extension instances. """
+        md.registerExtensions(extensions, self.config)
+        if not md.safeMode:
+            # Turn on processing of markdown text within raw html
+            md.preprocessors['html_block'].markdown_in_raw = True
+            md.parser.blockprocessors.add('markdown_block',
+                                          MarkdownInHtmlProcessor(md.parser),
+                                          '_begin')
+            md.parser.blockprocessors.tag_counter = -1
+            md.parser.blockprocessors.contain_span_tags = re.compile(
+                r'^(p|h[1-6]|li|dd|dt|td|th|legend|address)$', re.IGNORECASE)
+
+
+def makeExtension(*args, **kwargs):
+    return ExtraExtension(*args, **kwargs)
+
+
+class MarkdownInHtmlProcessor(BlockProcessor):
+    """Process Markdown Inside HTML Blocks."""
+    def test(self, parent, block):
+        return block == util.TAG_PLACEHOLDER % \
+            str(self.parser.blockprocessors.tag_counter + 1)
+
+    def _process_nests(self, element, block):
+        """Process the element's child elements in self.run."""
+        # Build list of indexes of each nest within the parent element.
+        nest_index = []  # a list of tuples: (left index, right index)
+        i = self.parser.blockprocessors.tag_counter + 1
+        while len(self._tag_data) > i and self._tag_data[i]['left_index']:
+            left_child_index = self._tag_data[i]['left_index']
+            right_child_index = self._tag_data[i]['right_index']
+            nest_index.append((left_child_index - 1, right_child_index))
+            i += 1
+
+        # Create each nest subelement.
+        for i, (left_index, right_index) in enumerate(nest_index[:-1]):
+            self.run(element, block[left_index:right_index],
+                     block[right_index:nest_index[i + 1][0]], True)
+        self.run(element, block[nest_index[-1][0]:nest_index[-1][1]],  # last
+                 block[nest_index[-1][1]:], True)                      # nest
+
+    def run(self, parent, blocks, tail=None, nest=False):
+        self._tag_data = self.parser.markdown.htmlStash.tag_data
+
+        self.parser.blockprocessors.tag_counter += 1
+        tag = self._tag_data[self.parser.blockprocessors.tag_counter]
+
+        # Create Element
+        markdown_value = tag['attrs'].pop('markdown')
+        element = util.etree.SubElement(parent, tag['tag'], tag['attrs'])
+
+        # Slice Off Block
+        if nest:
+            self.parser.parseBlocks(parent, tail)  # Process Tail
+            block = blocks[1:]
+        else:  # includes nests since a third level of nesting isn't supported
+            block = blocks[tag['left_index'] + 1: tag['right_index']]
+            del blocks[:tag['right_index']]
+
+        # Process Text
+        if (self.parser.blockprocessors.contain_span_tags.match(  # Span Mode
+                tag['tag']) and markdown_value != 'block') or \
+                markdown_value == 'span':
+            element.text = '\n'.join(block)
+        else:                                                     # Block Mode
+            i = self.parser.blockprocessors.tag_counter + 1
+            if len(self._tag_data) > i and self._tag_data[i]['left_index']:
+                first_subelement_index = self._tag_data[i]['left_index'] - 1
+                self.parser.parseBlocks(
+                    element, block[:first_subelement_index])
+                if not nest:
+                    block = self._process_nests(element, block)
+            else:
+                self.parser.parseBlocks(element, block)
diff --git a/markdown/extensions/fenced_code.py b/markdown/extensions/fenced_code.py
new file mode 100644
index 0000000..4af8891
--- /dev/null
+++ b/markdown/extensions/fenced_code.py
@@ -0,0 +1,112 @@
+"""
+Fenced Code Extension for Python Markdown
+=========================================
+
+This extension adds Fenced Code Blocks to Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html>
+for documentation.
+
+Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
+
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
+import re
+
+
+class FencedCodeExtension(Extension):
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add FencedBlockPreprocessor to the Markdown instance. """
+        md.registerExtension(self)
+
+        md.preprocessors.add('fenced_code_block',
+                             FencedBlockPreprocessor(md),
+                             ">normalize_whitespace")
+
+
+class FencedBlockPreprocessor(Preprocessor):
+    FENCED_BLOCK_RE = re.compile(r'''
+(?P<fence>^(?:~{3,}|`{3,}))[ ]*         # Opening ``` or ~~~
+(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*))?[ ]*  # Optional {, and lang
+# Optional highlight lines, single- or double-quote-delimited
+(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?[ ]*
+}?[ ]*\n                                # Optional closing }
+(?P<code>.*?)(?<=\n)
+(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
+    CODE_WRAP = '<pre><code%s>%s</code></pre>'
+    LANG_TAG = ' class="%s"'
+
+    def __init__(self, md):
+        super(FencedBlockPreprocessor, self).__init__(md)
+
+        self.checked_for_codehilite = False
+        self.codehilite_conf = {}
+
+    def run(self, lines):
+        """ Match and store Fenced Code Blocks in the HtmlStash. """
+
+        # Check for code hilite extension
+        if not self.checked_for_codehilite:
+            for ext in self.markdown.registeredExtensions:
+                if isinstance(ext, CodeHiliteExtension):
+                    self.codehilite_conf = ext.config
+                    break
+
+            self.checked_for_codehilite = True
+
+        text = "\n".join(lines)
+        while 1:
+            m = self.FENCED_BLOCK_RE.search(text)
+            if m:
+                lang = ''
+                if m.group('lang'):
+                    lang = self.LANG_TAG % m.group('lang')
+
+                # If config is not empty, then the codehighlite extension
+                # is enabled, so we call it to highlight the code
+                if self.codehilite_conf:
+                    highliter = CodeHilite(
+                        m.group('code'),
+                        linenums=self.codehilite_conf['linenums'][0],
+                        guess_lang=self.codehilite_conf['guess_lang'][0],
+                        css_class=self.codehilite_conf['css_class'][0],
+                        style=self.codehilite_conf['pygments_style'][0],
+                        lang=(m.group('lang') or None),
+                        noclasses=self.codehilite_conf['noclasses'][0],
+                        hl_lines=parse_hl_lines(m.group('hl_lines'))
+                    )
+
+                    code = highliter.hilite()
+                else:
+                    code = self.CODE_WRAP % (lang,
+                                             self._escape(m.group('code')))
+
+                placeholder = self.markdown.htmlStash.store(code, safe=True)
+                text = '%s\n%s\n%s' % (text[:m.start()],
+                                       placeholder,
+                                       text[m.end():])
+            else:
+                break
+        return text.split("\n")
+
+    def _escape(self, txt):
+        """ basic html escaping """
+        txt = txt.replace('&', '&amp;')
+        txt = txt.replace('<', '&lt;')
+        txt = txt.replace('>', '&gt;')
+        txt = txt.replace('"', '&quot;')
+        return txt
+
+
+def makeExtension(*args, **kwargs):
+    return FencedCodeExtension(*args, **kwargs)
diff --git a/markdown/extensions/footnotes.py b/markdown/extensions/footnotes.py
new file mode 100644
index 0000000..d8caae2
--- /dev/null
+++ b/markdown/extensions/footnotes.py
@@ -0,0 +1,319 @@
+"""
+Footnotes Extension for Python-Markdown
+=======================================
+
+Adds footnote handling to Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/footnotes.html>
+for documentation.
+
+Copyright The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+from ..inlinepatterns import Pattern
+from ..treeprocessors import Treeprocessor
+from ..postprocessors import Postprocessor
+from ..util import etree, text_type
+from ..odict import OrderedDict
+import re
+
+FN_BACKLINK_TEXT = "zz1337820767766393qq"
+NBSP_PLACEHOLDER = "qq3936677670287331zz"
+DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
+TABBED_RE = re.compile(r'((\t)|(    ))(.*)')
+
+
+class FootnoteExtension(Extension):
+    """ Footnote Extension. """
+
+    def __init__(self, *args, **kwargs):
+        """ Setup configs. """
+
+        self.config = {
+            'PLACE_MARKER':
+                ["///Footnotes Go Here///",
+                 "The text string that marks where the footnotes go"],
+            'UNIQUE_IDS':
+                [False,
+                 "Avoid name collisions across "
+                 "multiple calls to reset()."],
+            "BACKLINK_TEXT":
+                ["&#8617;",
+                 "The text string that links from the footnote "
+                 "to the reader's place."]
+        }
+        super(FootnoteExtension, self).__init__(*args, **kwargs)
+
+        # In multiple invocations, emit links that don't get tangled.
+        self.unique_prefix = 0
+
+        self.reset()
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add pieces to Markdown. """
+        md.registerExtension(self)
+        self.parser = md.parser
+        self.md = md
+        # Insert a preprocessor before ReferencePreprocessor
+        md.preprocessors.add(
+            "footnote", FootnotePreprocessor(self), "<reference"
+        )
+        # Insert an inline pattern before ImageReferencePattern
+        FOOTNOTE_RE = r'\[\^([^\]]*)\]'  # blah blah [^1] blah
+        md.inlinePatterns.add(
+            "footnote", FootnotePattern(FOOTNOTE_RE, self), "<reference"
+        )
+        # Insert a tree-processor that would actually add the footnote div
+        # This must be before all other treeprocessors (i.e., inline and
+        # codehilite) so they can run on the the contents of the div.
+        md.treeprocessors.add(
+            "footnote", FootnoteTreeprocessor(self), "_begin"
+        )
+        # Insert a postprocessor after amp_substitute oricessor
+        md.postprocessors.add(
+            "footnote", FootnotePostprocessor(self), ">amp_substitute"
+        )
+
+    def reset(self):
+        """ Clear footnotes on reset, and prepare for distinct document. """
+        self.footnotes = OrderedDict()
+        self.unique_prefix += 1
+
+    def findFootnotesPlaceholder(self, root):
+        """ Return ElementTree Element that contains Footnote placeholder. """
+        def finder(element):
+            for child in element:
+                if child.text:
+                    if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
+                        return child, element, True
+                if child.tail:
+                    if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
+                        return child, element, False
+                finder(child)
+            return None
+
+        res = finder(root)
+        return res
+
+    def setFootnote(self, id, text):
+        """ Store a footnote for later retrieval. """
+        self.footnotes[id] = text
+
+    def get_separator(self):
+        if self.md.output_format in ['html5', 'xhtml5']:
+            return '-'
+        return ':'
+
+    def makeFootnoteId(self, id):
+        """ Return footnote link id. """
+        if self.getConfig("UNIQUE_IDS"):
+            return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
+        else:
+            return 'fn%s%s' % (self.get_separator(), id)
+
+    def makeFootnoteRefId(self, id):
+        """ Return footnote back-link id. """
+        if self.getConfig("UNIQUE_IDS"):
+            return 'fnref%s%d-%s' % (self.get_separator(),
+                                     self.unique_prefix, id)
+        else:
+            return 'fnref%s%s' % (self.get_separator(), id)
+
+    def makeFootnotesDiv(self, root):
+        """ Return div of footnotes as et Element. """
+
+        if not list(self.footnotes.keys()):
+            return None
+
+        div = etree.Element("div")
+        div.set('class', 'footnote')
+        etree.SubElement(div, "hr")
+        ol = etree.SubElement(div, "ol")
+
+        for id in self.footnotes.keys():
+            li = etree.SubElement(ol, "li")
+            li.set("id", self.makeFootnoteId(id))
+            self.parser.parseChunk(li, self.footnotes[id])
+            backlink = etree.Element("a")
+            backlink.set("href", "#" + self.makeFootnoteRefId(id))
+            if self.md.output_format not in ['html5', 'xhtml5']:
+                backlink.set("rev", "footnote")  # Invalid in HTML5
+            backlink.set("class", "footnote-backref")
+            backlink.set(
+                "title",
+                "Jump back to footnote %d in the text" %
+                (self.footnotes.index(id)+1)
+            )
+            backlink.text = FN_BACKLINK_TEXT
+
+            if li.getchildren():
+                node = li[-1]
+                if node.tag == "p":
+                    node.text = node.text + NBSP_PLACEHOLDER
+                    node.append(backlink)
+                else:
+                    p = etree.SubElement(li, "p")
+                    p.append(backlink)
+        return div
+
+
+class FootnotePreprocessor(Preprocessor):
+    """ Find all footnote references and store for later use. """
+
+    def __init__(self, footnotes):
+        self.footnotes = footnotes
+
+    def run(self, lines):
+        """
+        Loop through lines and find, set, and remove footnote definitions.
+
+        Keywords:
+
+        * lines: A list of lines of text
+
+        Return: A list of lines of text with footnote definitions removed.
+
+        """
+        newlines = []
+        i = 0
+        while True:
+            m = DEF_RE.match(lines[i])
+            if m:
+                fn, _i = self.detectTabbed(lines[i+1:])
+                fn.insert(0, m.group(2))
+                i += _i-1  # skip past footnote
+                self.footnotes.setFootnote(m.group(1), "\n".join(fn))
+            else:
+                newlines.append(lines[i])
+            if len(lines) > i+1:
+                i += 1
+            else:
+                break
+        return newlines
+
+    def detectTabbed(self, lines):
+        """ Find indented text and remove indent before further proccesing.
+
+        Keyword arguments:
+
+        * lines: an array of strings
+
+        Returns: a list of post processed items and the index of last line.
+
+        """
+        items = []
+        blank_line = False  # have we encountered a blank line yet?
+        i = 0  # to keep track of where we are
+
+        def detab(line):
+            match = TABBED_RE.match(line)
+            if match:
+                return match.group(4)
+
+        for line in lines:
+            if line.strip():  # Non-blank line
+                detabbed_line = detab(line)
+                if detabbed_line:
+                    items.append(detabbed_line)
+                    i += 1
+                    continue
+                elif not blank_line and not DEF_RE.match(line):
+                    # not tabbed but still part of first par.
+                    items.append(line)
+                    i += 1
+                    continue
+                else:
+                    return items, i+1
+
+            else:  # Blank line: _maybe_ we are done.
+                blank_line = True
+                i += 1  # advance
+
+                # Find the next non-blank line
+                for j in range(i, len(lines)):
+                    if lines[j].strip():
+                        next_line = lines[j]
+                        break
+                else:
+                    break  # There is no more text; we are done.
+
+                # Check if the next non-blank line is tabbed
+                if detab(next_line):  # Yes, more work to do.
+                    items.append("")
+                    continue
+                else:
+                    break  # No, we are done.
+        else:
+            i += 1
+
+        return items, i
+
+
+class FootnotePattern(Pattern):
+    """ InlinePattern for footnote markers in a document's body text. """
+
+    def __init__(self, pattern, footnotes):
+        super(FootnotePattern, self).__init__(pattern)
+        self.footnotes = footnotes
+
+    def handleMatch(self, m):
+        id = m.group(2)
+        if id in self.footnotes.footnotes.keys():
+            sup = etree.Element("sup")
+            a = etree.SubElement(sup, "a")
+            sup.set('id', self.footnotes.makeFootnoteRefId(id))
+            a.set('href', '#' + self.footnotes.makeFootnoteId(id))
+            if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
+                a.set('rel', 'footnote')  # invalid in HTML5
+            a.set('class', 'footnote-ref')
+            a.text = text_type(self.footnotes.footnotes.index(id) + 1)
+            return sup
+        else:
+            return None
+
+
+class FootnoteTreeprocessor(Treeprocessor):
+    """ Build and append footnote div to end of document. """
+
+    def __init__(self, footnotes):
+        self.footnotes = footnotes
+
+    def run(self, root):
+        footnotesDiv = self.footnotes.makeFootnotesDiv(root)
+        if footnotesDiv is not None:
+            result = self.footnotes.findFootnotesPlaceholder(root)
+            if result:
+                child, parent, isText = result
+                ind = parent.getchildren().index(child)
+                if isText:
+                    parent.remove(child)
+                    parent.insert(ind, footnotesDiv)
+                else:
+                    parent.insert(ind + 1, footnotesDiv)
+                    child.tail = None
+            else:
+                root.append(footnotesDiv)
+
+
+class FootnotePostprocessor(Postprocessor):
+    """ Replace placeholders with html entities. """
+    def __init__(self, footnotes):
+        self.footnotes = footnotes
+
+    def run(self, text):
+        text = text.replace(
+            FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
+        )
+        return text.replace(NBSP_PLACEHOLDER, "&#160;")
+
+
+def makeExtension(*args, **kwargs):
+    """ Return an instance of the FootnoteExtension """
+    return FootnoteExtension(*args, **kwargs)
diff --git a/markdown/extensions/headerid.py b/markdown/extensions/headerid.py
new file mode 100644
index 0000000..2cb20b9
--- /dev/null
+++ b/markdown/extensions/headerid.py
@@ -0,0 +1,97 @@
+"""
+HeaderID Extension for Python-Markdown
+======================================
+
+Auto-generate id attributes for HTML headers.
+
+See <https://pythonhosted.org/Markdown/extensions/header_id.html>
+for documentation.
+
+Original code Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
+
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import parseBoolValue
+from .toc import slugify, unique, stashedHTML2text
+import warnings
+
+
+class HeaderIdTreeprocessor(Treeprocessor):
+    """ Assign IDs to headers. """
+
+    IDs = set()
+
+    def run(self, doc):
+        start_level, force_id = self._get_meta()
+        slugify = self.config['slugify']
+        sep = self.config['separator']
+        for elem in doc:
+            if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
+                if force_id:
+                    if "id" in elem.attrib:
+                        id = elem.get('id')
+                    else:
+                        id = stashedHTML2text(''.join(elem.itertext()), self.md)
+                        id = slugify(id, sep)
+                    elem.set('id', unique(id, self.IDs))
+                if start_level:
+                    level = int(elem.tag[-1]) + start_level
+                    if level > 6:
+                        level = 6
+                    elem.tag = 'h%d' % level
+
+    def _get_meta(self):
+        """ Return meta data suported by this ext as a tuple """
+        level = int(self.config['level']) - 1
+        force = parseBoolValue(self.config['forceid'])
+        if hasattr(self.md, 'Meta'):
+            if 'header_level' in self.md.Meta:
+                level = int(self.md.Meta['header_level'][0]) - 1
+            if 'header_forceid' in self.md.Meta:
+                force = parseBoolValue(self.md.Meta['header_forceid'][0])
+        return level, force
+
+
+class HeaderIdExtension(Extension):
+    def __init__(self, *args, **kwargs):
+        # set defaults
+        self.config = {
+            'level': ['1', 'Base level for headers.'],
+            'forceid': ['True', 'Force all headers to have an id.'],
+            'separator': ['-', 'Word separator.'],
+            'slugify': [slugify, 'Callable to generate anchors']
+        }
+
+        super(HeaderIdExtension, self).__init__(*args, **kwargs)
+
+        warnings.warn(
+            'The HeaderId Extension is pending deprecation. Use the TOC Extension instead.',
+            PendingDeprecationWarning
+        )
+
+    def extendMarkdown(self, md, md_globals):
+        md.registerExtension(self)
+        self.processor = HeaderIdTreeprocessor()
+        self.processor.md = md
+        self.processor.config = self.getConfigs()
+        if 'attr_list' in md.treeprocessors.keys():
+            # insert after attr_list treeprocessor
+            md.treeprocessors.add('headerid', self.processor, '>attr_list')
+        else:
+            # insert after 'prettify' treeprocessor.
+            md.treeprocessors.add('headerid', self.processor, '>prettify')
+
+    def reset(self):
+        self.processor.IDs = set()
+
+
+def makeExtension(*args, **kwargs):
+    return HeaderIdExtension(*args, **kwargs)
diff --git a/markdown/extensions/meta.py b/markdown/extensions/meta.py
new file mode 100644
index 0000000..711235e
--- /dev/null
+++ b/markdown/extensions/meta.py
@@ -0,0 +1,78 @@
+"""
+Meta Data Extension for Python-Markdown
+=======================================
+
+This extension adds Meta Data handling to markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/meta_data.html>
+for documentation.
+
+Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+import re
+import logging
+
+log = logging.getLogger('MARKDOWN')
+
+# Global Vars
+META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
+META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
+BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
+END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
+
+
+class MetaExtension (Extension):
+    """ Meta-Data extension for Python-Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add MetaPreprocessor to Markdown instance. """
+        md.preprocessors.add("meta",
+                             MetaPreprocessor(md),
+                             ">normalize_whitespace")
+
+
+class MetaPreprocessor(Preprocessor):
+    """ Get Meta-Data. """
+
+    def run(self, lines):
+        """ Parse Meta-Data and store in Markdown.Meta. """
+        meta = {}
+        key = None
+        if lines and BEGIN_RE.match(lines[0]):
+            lines.pop(0)
+        while lines:
+            line = lines.pop(0)
+            m1 = META_RE.match(line)
+            if line.strip() == '' or END_RE.match(line):
+                break  # blank line or end of YAML header - done
+            if m1:
+                key = m1.group('key').lower().strip()
+                value = m1.group('value').strip()
+                try:
+                    meta[key].append(value)
+                except KeyError:
+                    meta[key] = [value]
+            else:
+                m2 = META_MORE_RE.match(line)
+                if m2 and key:
+                    # Add another line to existing key
+                    meta[key].append(m2.group('value').strip())
+                else:
+                    lines.insert(0, line)
+                    break  # no meta data - done
+        self.markdown.Meta = meta
+        return lines
+
+
+def makeExtension(*args, **kwargs):
+    return MetaExtension(*args, **kwargs)
diff --git a/markdown/extensions/nl2br.py b/markdown/extensions/nl2br.py
new file mode 100644
index 0000000..8acd60c
--- /dev/null
+++ b/markdown/extensions/nl2br.py
@@ -0,0 +1,35 @@
+"""
+NL2BR Extension
+===============
+
+A Python-Markdown extension to treat newlines as hard breaks; like
+GitHub-flavored Markdown does.
+
+See <https://pythonhosted.org/Markdown/extensions/nl2br.html>
+for documentation.
+
+Oringinal code Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
+
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import SubstituteTagPattern
+
+BR_RE = r'\n'
+
+
+class Nl2BrExtension(Extension):
+
+    def extendMarkdown(self, md, md_globals):
+        br_tag = SubstituteTagPattern(BR_RE, 'br')
+        md.inlinePatterns.add('nl', br_tag, '_end')
+
+
+def makeExtension(*args, **kwargs):
+    return Nl2BrExtension(*args, **kwargs)
diff --git a/markdown/extensions/sane_lists.py b/markdown/extensions/sane_lists.py
new file mode 100644
index 0000000..213c8a6
--- /dev/null
+++ b/markdown/extensions/sane_lists.py
@@ -0,0 +1,47 @@
+"""
+Sane List Extension for Python-Markdown
+=======================================
+
+Modify the behavior of Lists in Python-Markdown to act in a sane manor.
+
+See <https://pythonhosted.org/Markdown/extensions/sane_lists.html>
+for documentation.
+
+Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
+
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import OListProcessor, UListProcessor
+import re
+
+
+class SaneOListProcessor(OListProcessor):
+
+    CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
+    SIBLING_TAGS = ['ol']
+
+
+class SaneUListProcessor(UListProcessor):
+
+    CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
+    SIBLING_TAGS = ['ul']
+
+
+class SaneListExtension(Extension):
+    """ Add sane lists to Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Override existing Processors. """
+        md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
+        md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
+
+
+def makeExtension(*args, **kwargs):
+    return SaneListExtension(*args, **kwargs)
diff --git a/markdown/extensions/smart_strong.py b/markdown/extensions/smart_strong.py
new file mode 100644
index 0000000..58570bb
--- /dev/null
+++ b/markdown/extensions/smart_strong.py
@@ -0,0 +1,41 @@
+'''
+Smart_Strong Extension for Python-Markdown
+==========================================
+
+This extention adds smarter handling of double underscores within words.
+
+See <https://pythonhosted.org/Markdown/extensions/smart_strong.html>
+for documentation.
+
+Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
+
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+'''
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import SimpleTagPattern
+
+SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
+STRONG_RE = r'(\*{2})(.+?)\2'
+
+
+class SmartEmphasisExtension(Extension):
+    """ Add smart_emphasis extension to Markdown class."""
+
+    def extendMarkdown(self, md, md_globals):
+        """ Modify inline patterns. """
+        md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
+        md.inlinePatterns.add(
+            'strong2',
+            SimpleTagPattern(SMART_STRONG_RE, 'strong'),
+            '>emphasis2'
+        )
+
+
+def makeExtension(*args, **kwargs):
+    return SmartEmphasisExtension(*args, **kwargs)
diff --git a/markdown/extensions/smarty.py b/markdown/extensions/smarty.py
new file mode 100644
index 0000000..46e54c1
--- /dev/null
+++ b/markdown/extensions/smarty.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+'''
+Smarty extension for Python-Markdown
+====================================
+
+Adds conversion of ASCII dashes, quotes and ellipses to their HTML
+entity equivalents.
+
+See <https://pythonhosted.org/Markdown/extensions/smarty.html>
+for documentation.
+
+Author: 2013, Dmitry Shachnev <mitya57@gmail.com>
+
+All changes Copyright 2013-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+SmartyPants license:
+
+   Copyright (c) 2003 John Gruber <http://daringfireball.net/>
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+   *  Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   *  Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+
+   *  Neither the name "SmartyPants" nor the names of its contributors
+      may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+   This software is provided by the copyright holders and contributors "as
+   is" and any express or implied warranties, including, but not limited
+   to, the implied warranties of merchantability and fitness for a
+   particular purpose are disclaimed. In no event shall the copyright
+   owner or contributors be liable for any direct, indirect, incidental,
+   special, exemplary, or consequential damages (including, but not
+   limited to, procurement of substitute goods or services; loss of use,
+   data, or profits; or business interruption) however caused and on any
+   theory of liability, whether in contract, strict liability, or tort
+   (including negligence or otherwise) arising in any way out of the use
+   of this software, even if advised of the possibility of such damage.
+
+
+smartypants.py license:
+
+   smartypants.py is a derivative work of SmartyPants.
+   Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/>
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+   *  Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   *  Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+
+   This software is provided by the copyright holders and contributors "as
+   is" and any express or implied warranties, including, but not limited
+   to, the implied warranties of merchantability and fitness for a
+   particular purpose are disclaimed. In no event shall the copyright
+   owner or contributors be liable for any direct, indirect, incidental,
+   special, exemplary, or consequential damages (including, but not
+   limited to, procurement of substitute goods or services; loss of use,
+   data, or profits; or business interruption) however caused and on any
+   theory of liability, whether in contract, strict liability, or tort
+   (including negligence or otherwise) arising in any way out of the use
+   of this software, even if advised of the possibility of such damage.
+
+'''
+
+
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import HtmlPattern
+from ..odict import OrderedDict
+from ..treeprocessors import InlineProcessor
+
+
+# Constants for quote education.
+punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
+endOfWordClass = r"[\s.,;:!?)]"
+closeClass = "[^\ \t\r\n\[\{\(\-\u0002\u0003]"
+
+openingQuotesBase = (
+    '(\s'               # a  whitespace char
+    '|&nbsp;'           # or a non-breaking space entity
+    '|--'               # or dashes
+    '|–|—'              # or unicode
+    '|&[mn]dash;'       # or named dash entities
+    '|&#8211;|&#8212;'  # or decimal entities
+    ')'
+)
+
+substitutions = {
+    'mdash': '&mdash;',
+    'ndash': '&ndash;',
+    'ellipsis': '&hellip;',
+    'left-angle-quote': '&laquo;',
+    'right-angle-quote': '&raquo;',
+    'left-single-quote': '&lsquo;',
+    'right-single-quote': '&rsquo;',
+    'left-double-quote': '&ldquo;',
+    'right-double-quote': '&rdquo;',
+}
+
+
+# Special case if the very first character is a quote
+# followed by punctuation at a non-word-break. Close the quotes by brute force:
+singleQuoteStartRe = r"^'(?=%s\B)" % punctClass
+doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass
+
+# Special case for double sets of quotes, e.g.:
+#   <p>He said, "'Quoted' words in a larger quote."</p>
+doubleQuoteSetsRe = r""""'(?=\w)"""
+singleQuoteSetsRe = r"""'"(?=\w)"""
+
+# Special case for decade abbreviations (the '80s):
+decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)"
+
+# Get most opening double quotes:
+openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
+
+# Double closing quotes:
+closingDoubleQuotesRegex = r'"(?=\s)'
+closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
+
+# Get most opening single quotes:
+openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
+
+# Single closing quotes:
+closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
+closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass
+
+# All remaining quotes should be opening ones
+remainingSingleQuotesRegex = "'"
+remainingDoubleQuotesRegex = '"'
+
+
+class SubstituteTextPattern(HtmlPattern):
+    def __init__(self, pattern, replace, markdown_instance):
+        """ Replaces matches with some text. """
+        HtmlPattern.__init__(self, pattern)
+        self.replace = replace
+        self.markdown = markdown_instance
+
+    def handleMatch(self, m):
+        result = ''
+        for part in self.replace:
+            if isinstance(part, int):
+                result += m.group(part)
+            else:
+                result += self.markdown.htmlStash.store(part, safe=True)
+        return result
+
+
+class SmartyExtension(Extension):
+    def __init__(self, *args, **kwargs):
+        self.config = {
+            'smart_quotes': [True, 'Educate quotes'],
+            'smart_angled_quotes': [False, 'Educate angled quotes'],
+            'smart_dashes': [True, 'Educate dashes'],
+            'smart_ellipses': [True, 'Educate ellipses'],
+            'substitutions': [{}, 'Overwrite default substitutions'],
+        }
+        super(SmartyExtension, self).__init__(*args, **kwargs)
+        self.substitutions = dict(substitutions)
+        self.substitutions.update(self.getConfig('substitutions', default={}))
+
+    def _addPatterns(self, md, patterns, serie):
+        for ind, pattern in enumerate(patterns):
+            pattern += (md,)
+            pattern = SubstituteTextPattern(*pattern)
+            after = ('>smarty-%s-%d' % (serie, ind - 1) if ind else '_begin')
+            name = 'smarty-%s-%d' % (serie, ind)
+            self.inlinePatterns.add(name, pattern, after)
+
+    def educateDashes(self, md):
+        emDashesPattern = SubstituteTextPattern(
+            r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md
+        )
+        enDashesPattern = SubstituteTextPattern(
+            r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md
+        )
+        self.inlinePatterns.add('smarty-em-dashes', emDashesPattern, '_begin')
+        self.inlinePatterns.add(
+            'smarty-en-dashes', enDashesPattern, '>smarty-em-dashes'
+        )
+
+    def educateEllipses(self, md):
+        ellipsesPattern = SubstituteTextPattern(
+            r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md
+        )
+        self.inlinePatterns.add('smarty-ellipses', ellipsesPattern, '_begin')
+
+    def educateAngledQuotes(self, md):
+        leftAngledQuotePattern = SubstituteTextPattern(
+            r'\<\<', (self.substitutions['left-angle-quote'],), md
+        )
+        rightAngledQuotePattern = SubstituteTextPattern(
+            r'\>\>', (self.substitutions['right-angle-quote'],), md
+        )
+        self.inlinePatterns.add(
+            'smarty-left-angle-quotes', leftAngledQuotePattern, '_begin'
+        )
+        self.inlinePatterns.add(
+            'smarty-right-angle-quotes',
+            rightAngledQuotePattern,
+            '>smarty-left-angle-quotes'
+        )
+
+    def educateQuotes(self, md):
+        lsquo = self.substitutions['left-single-quote']
+        rsquo = self.substitutions['right-single-quote']
+        ldquo = self.substitutions['left-double-quote']
+        rdquo = self.substitutions['right-double-quote']
+        patterns = (
+            (singleQuoteStartRe, (rsquo,)),
+            (doubleQuoteStartRe, (rdquo,)),
+            (doubleQuoteSetsRe, (ldquo + lsquo,)),
+            (singleQuoteSetsRe, (lsquo + ldquo,)),
+            (decadeAbbrRe, (rsquo,)),
+            (openingSingleQuotesRegex, (2, lsquo)),
+            (closingSingleQuotesRegex, (rsquo,)),
+            (closingSingleQuotesRegex2, (rsquo, 2)),
+            (remainingSingleQuotesRegex, (lsquo,)),
+            (openingDoubleQuotesRegex, (2, ldquo)),
+            (closingDoubleQuotesRegex, (rdquo,)),
+            (closingDoubleQuotesRegex2, (rdquo,)),
+            (remainingDoubleQuotesRegex, (ldquo,))
+        )
+        self._addPatterns(md, patterns, 'quotes')
+
+    def extendMarkdown(self, md, md_globals):
+        configs = self.getConfigs()
+        self.inlinePatterns = OrderedDict()
+        if configs['smart_ellipses']:
+            self.educateEllipses(md)
+        if configs['smart_quotes']:
+            self.educateQuotes(md)
+        if configs['smart_angled_quotes']:
+            self.educateAngledQuotes(md)
+        if configs['smart_dashes']:
+            self.educateDashes(md)
+        inlineProcessor = InlineProcessor(md)
+        inlineProcessor.inlinePatterns = self.inlinePatterns
+        md.treeprocessors.add('smarty', inlineProcessor, '_end')
+        md.ESCAPED_CHARS.extend(['"', "'"])
+
+
+def makeExtension(*args, **kwargs):
+    return SmartyExtension(*args, **kwargs)
diff --git a/markdown/extensions/tables.py b/markdown/extensions/tables.py
new file mode 100644
index 0000000..368321d
--- /dev/null
+++ b/markdown/extensions/tables.py
@@ -0,0 +1,102 @@
+"""
+Tables Extension for Python-Markdown
+====================================
+
+Added parsing of tables to Python-Markdown.
+
+See <https://pythonhosted.org/Markdown/extensions/tables.html>
+for documentation.
+
+Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import BlockProcessor
+from ..util import etree
+
+
+class TableProcessor(BlockProcessor):
+    """ Process Tables. """
+
+    def test(self, parent, block):
+        rows = block.split('\n')
+        return (len(rows) > 1 and '|' in rows[0] and
+                '|' in rows[1] and '-' in rows[1] and
+                rows[1].strip()[0] in ['|', ':', '-'])
+
+    def run(self, parent, blocks):
+        """ Parse a table block and build table. """
+        block = blocks.pop(0).split('\n')
+        header = block[0].strip()
+        seperator = block[1].strip()
+        rows = [] if len(block) < 3 else block[2:]
+        # Get format type (bordered by pipes or not)
+        border = False
+        if header.startswith('|'):
+            border = True
+        # Get alignment of columns
+        align = []
+        for c in self._split_row(seperator, border):
+            if c.startswith(':') and c.endswith(':'):
+                align.append('center')
+            elif c.startswith(':'):
+                align.append('left')
+            elif c.endswith(':'):
+                align.append('right')
+            else:
+                align.append(None)
+        # Build table
+        table = etree.SubElement(parent, 'table')
+        thead = etree.SubElement(table, 'thead')
+        self._build_row(header, thead, align, border)
+        tbody = etree.SubElement(table, 'tbody')
+        for row in rows:
+            self._build_row(row.strip(), tbody, align, border)
+
+    def _build_row(self, row, parent, align, border):
+        """ Given a row of text, build table cells. """
+        tr = etree.SubElement(parent, 'tr')
+        tag = 'td'
+        if parent.tag == 'thead':
+            tag = 'th'
+        cells = self._split_row(row, border)
+        # We use align here rather than cells to ensure every row
+        # contains the same number of columns.
+        for i, a in enumerate(align):
+            c = etree.SubElement(tr, tag)
+            try:
+                c.text = cells[i].strip()
+            except IndexError:  # pragma: no cover
+                c.text = ""
+            if a:
+                c.set('align', a)
+
+    def _split_row(self, row, border):
+        """ split a row of text into list of cells. """
+        if border:
+            if row.startswith('|'):
+                row = row[1:]
+            if row.endswith('|'):
+                row = row[:-1]
+        return row.split('|')
+
+
+class TableExtension(Extension):
+    """ Add tables to Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add an instance of TableProcessor to BlockParser. """
+        md.parser.blockprocessors.add('table',
+                                      TableProcessor(md.parser),
+                                      '<hashheader')
+
+
+def makeExtension(*args, **kwargs):
+    return TableExtension(*args, **kwargs)
diff --git a/markdown/extensions/toc.py b/markdown/extensions/toc.py
new file mode 100644
index 0000000..b3cf898
--- /dev/null
+++ b/markdown/extensions/toc.py
@@ -0,0 +1,309 @@
+"""
+Table of Contents Extension for Python-Markdown
+===============================================
+
+See <https://pythonhosted.org/Markdown/extensions/toc.html>
+for documentation.
+
+Oringinal code Copyright 2008 [Jack Miller](http://codezen.org)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type
+import re
+import unicodedata
+
+
+def slugify(value, separator):
+    """ Slugify a string, to make it URL friendly. """
+    value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
+    value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
+    return re.sub('[%s\s]+' % separator, separator, value)
+
+
+IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
+
+
+def unique(id, ids):
+    """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
+    while id in ids or not id:
+        m = IDCOUNT_RE.match(id)
+        if m:
+            id = '%s_%d' % (m.group(1), int(m.group(2))+1)
+        else:
+            id = '%s_%d' % (id, 1)
+    ids.add(id)
+    return id
+
+
+def stashedHTML2text(text, md):
+    """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
+    def _html_sub(m):
+        """ Substitute raw html with plain text. """
+        try:
+            raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
+        except (IndexError, TypeError):  # pragma: no cover
+            return m.group(0)
+        if md.safeMode and not safe:  # pragma: no cover
+            return ''
+        # Strip out tags and entities - leaveing text
+        return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
+
+    return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
+
+
+def nest_toc_tokens(toc_list):
+    """Given an unsorted list with errors and skips, return a nested one.
+    [{'level': 1}, {'level': 2}]
+    =>
+    [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
+
+    A wrong list is also converted:
+    [{'level': 2}, {'level': 1}]
+    =>
+    [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
+    """
+
+    ordered_list = []
+    if len(toc_list):
+        # Initialize everything by processing the first entry
+        last = toc_list.pop(0)
+        last['children'] = []
+        levels = [last['level']]
+        ordered_list.append(last)
+        parents = []
+
+        # Walk the rest nesting the entries properly
+        while toc_list:
+            t = toc_list.pop(0)
+            current_level = t['level']
+            t['children'] = []
+
+            # Reduce depth if current level < last item's level
+            if current_level < levels[-1]:
+                # Pop last level since we know we are less than it
+                levels.pop()
+
+                # Pop parents and levels we are less than or equal to
+                to_pop = 0
+                for p in reversed(parents):
+                    if current_level <= p['level']:
+                        to_pop += 1
+                    else:  # pragma: no cover
+                        break
+                if to_pop:
+                    levels = levels[:-to_pop]
+                    parents = parents[:-to_pop]
+
+                # Note current level as last
+                levels.append(current_level)
+
+            # Level is the same, so append to
+            # the current parent (if available)
+            if current_level == levels[-1]:
+                (parents[-1]['children'] if parents
+                 else ordered_list).append(t)
+
+            # Current level is > last item's level,
+            # So make last item a parent and append current as child
+            else:
+                last['children'].append(t)
+                parents.append(last)
+                levels.append(current_level)
+            last = t
+
+    return ordered_list
+
+
+class TocTreeprocessor(Treeprocessor):
+    def __init__(self, md, config):
+        super(TocTreeprocessor, self).__init__(md)
+
+        self.marker = config["marker"]
+        self.title = config["title"]
+        self.base_level = int(config["baselevel"]) - 1
+        self.slugify = config["slugify"]
+        self.sep = config["separator"]
+        self.use_anchors = parseBoolValue(config["anchorlink"])
+        self.use_permalinks = parseBoolValue(config["permalink"], False)
+        if self.use_permalinks is None:
+            self.use_permalinks = config["permalink"]
+
+        self.header_rgx = re.compile("[Hh][123456]")
+
+    def iterparent(self, root):
+        ''' Iterator wrapper to get parent and child all at once. '''
+        for parent in root.iter():
+            for child in parent:
+                yield parent, child
+
+    def replace_marker(self, root, elem):
+        ''' Replace marker with elem. '''
+        for (p, c) in self.iterparent(root):
+            text = ''.join(c.itertext()).strip()
+            if not text:
+                continue
+
+            # To keep the output from screwing up the
+            # validation by putting a <div> inside of a <p>
+            # we actually replace the <p> in its entirety.
+            # We do not allow the marker inside a header as that
+            # would causes an enless loop of placing a new TOC
+            # inside previously generated TOC.
+            if c.text and c.text.strip() == self.marker and \
+               not self.header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
+                for i in range(len(p)):
+                    if p[i] == c:
+                        p[i] = elem
+                        break
+
+    def set_level(self, elem):
+        ''' Adjust header level according to base level. '''
+        level = int(elem.tag[-1]) + self.base_level
+        if level > 6:
+            level = 6
+        elem.tag = 'h%d' % level
+
+    def add_anchor(self, c, elem_id):  # @ReservedAssignment
+        anchor = etree.Element("a")
+        anchor.text = c.text
+        anchor.attrib["href"] = "#" + elem_id
+        anchor.attrib["class"] = "toclink"
+        c.text = ""
+        for elem in c:
+            anchor.append(elem)
+            c.remove(elem)
+        c.append(anchor)
+
+    def add_permalink(self, c, elem_id):
+        permalink = etree.Element("a")
+        permalink.text = ("%spara;" % AMP_SUBSTITUTE
+                          if self.use_permalinks is True
+                          else self.use_permalinks)
+        permalink.attrib["href"] = "#" + elem_id
+        permalink.attrib["class"] = "headerlink"
+        permalink.attrib["title"] = "Permanent link"
+        c.append(permalink)
+
+    def build_toc_div(self, toc_list):
+        """ Return a string div given a toc list. """
+        div = etree.Element("div")
+        div.attrib["class"] = "toc"
+
+        # Add title to the div
+        if self.title:
+            header = etree.SubElement(div, "span")
+            header.attrib["class"] = "toctitle"
+            header.text = self.title
+
+        def build_etree_ul(toc_list, parent):
+            ul = etree.SubElement(parent, "ul")
+            for item in toc_list:
+                # List item link, to be inserted into the toc div
+                li = etree.SubElement(ul, "li")
+                link = etree.SubElement(li, "a")
+                link.text = item.get('name', '')
+                link.attrib["href"] = '#' + item.get('id', '')
+                if item['children']:
+                    build_etree_ul(item['children'], li)
+            return ul
+
+        build_etree_ul(toc_list, div)
+        prettify = self.markdown.treeprocessors.get('prettify')
+        if prettify:
+            prettify.run(div)
+        return div
+
+    def run(self, doc):
+        # Get a list of id attributes
+        used_ids = set()
+        for el in doc.iter():
+            if "id" in el.attrib:
+                used_ids.add(el.attrib["id"])
+
+        toc_tokens = []
+        for el in doc.iter():
+            if isinstance(el.tag, string_type) and self.header_rgx.match(el.tag):
+                self.set_level(el)
+                text = ''.join(el.itertext()).strip()
+
+                # Do not override pre-existing ids
+                if "id" not in el.attrib:
+                    innertext = stashedHTML2text(text, self.markdown)
+                    el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
+
+                toc_tokens.append({
+                    'level': int(el.tag[-1]),
+                    'id': el.attrib["id"],
+                    'name': text
+                })
+
+                if self.use_anchors:
+                    self.add_anchor(el, el.attrib["id"])
+                if self.use_permalinks:
+                    self.add_permalink(el, el.attrib["id"])
+
+        div = self.build_toc_div(nest_toc_tokens(toc_tokens))
+        if self.marker:
+            self.replace_marker(doc, div)
+
+        # serialize and attach to markdown instance.
+        toc = self.markdown.serializer(div)
+        for pp in self.markdown.postprocessors.values():
+            toc = pp.run(toc)
+        self.markdown.toc = toc
+
+
+class TocExtension(Extension):
+
+    TreeProcessorClass = TocTreeprocessor
+
+    def __init__(self, *args, **kwargs):
+        self.config = {
+            "marker": ['[TOC]',
+                       'Text to find and replace with Table of Contents - '
+                       'Set to an empty string to disable. Defaults to "[TOC]"'],
+            "title": ["",
+                      "Title to insert into TOC <div> - "
+                      "Defaults to an empty string"],
+            "anchorlink": [False,
+                           "True if header should be a self link - "
+                           "Defaults to False"],
+            "permalink": [0,
+                          "True or link text if a Sphinx-style permalink should "
+                          "be added - Defaults to False"],
+            "baselevel": ['1', 'Base level for headers.'],
+            "slugify": [slugify,
+                        "Function to generate anchors based on header text - "
+                        "Defaults to the headerid ext's slugify function."],
+            'separator': ['-', 'Word separator. Defaults to "-".']
+        }
+
+        super(TocExtension, self).__init__(*args, **kwargs)
+
+    def extendMarkdown(self, md, md_globals):
+        md.registerExtension(self)
+        self.md = md
+        self.reset()
+        tocext = self.TreeProcessorClass(md, self.getConfigs())
+        # Headerid ext is set to '>prettify'. With this set to '_end',
+        # it should always come after headerid ext (and honor ids assinged
+        # by the header id extension) if both are used. Same goes for
+        # attr_list extension. This must come last because we don't want
+        # to redefine ids after toc is created. But we do want toc prettified.
+        md.treeprocessors.add("toc", tocext, "_end")
+
+    def reset(self):
+        self.md.toc = ''
+
+
+def makeExtension(*args, **kwargs):
+    return TocExtension(*args, **kwargs)
diff --git a/markdown/extensions/wikilinks.py b/markdown/extensions/wikilinks.py
new file mode 100644
index 0000000..94e1b67
--- /dev/null
+++ b/markdown/extensions/wikilinks.py
@@ -0,0 +1,89 @@
+'''
+WikiLinks Extension for Python-Markdown
+======================================
+
+Converts [[WikiLinks]] to relative links.
+
+See <https://pythonhosted.org/Markdown/extensions/wikilinks.html>
+for documentation.
+
+Original code Copyright [Waylan Limberg](http://achinghead.com/).
+
+All changes Copyright The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+'''
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import Pattern
+from ..util import etree
+import re
+
+
+def build_url(label, base, end):
+    """ Build a url from the label, a base, and an end. """
+    clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
+    return '%s%s%s' % (base, clean_label, end)
+
+
+class WikiLinkExtension(Extension):
+
+    def __init__(self, *args, **kwargs):
+        self.config = {
+            'base_url': ['/', 'String to append to beginning or URL.'],
+            'end_url': ['/', 'String to append to end of URL.'],
+            'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
+            'build_url': [build_url, 'Callable formats URL from label.'],
+        }
+
+        super(WikiLinkExtension, self).__init__(*args, **kwargs)
+
+    def extendMarkdown(self, md, md_globals):
+        self.md = md
+
+        # append to end of inline patterns
+        WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
+        wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
+        wikilinkPattern.md = md
+        md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
+
+
+class WikiLinks(Pattern):
+    def __init__(self, pattern, config):
+        super(WikiLinks, self).__init__(pattern)
+        self.config = config
+
+    def handleMatch(self, m):
+        if m.group(2).strip():
+            base_url, end_url, html_class = self._getMeta()
+            label = m.group(2).strip()
+            url = self.config['build_url'](label, base_url, end_url)
+            a = etree.Element('a')
+            a.text = label
+            a.set('href', url)
+            if html_class:
+                a.set('class', html_class)
+        else:
+            a = ''
+        return a
+
+    def _getMeta(self):
+        """ Return meta data or config data. """
+        base_url = self.config['base_url']
+        end_url = self.config['end_url']
+        html_class = self.config['html_class']
+        if hasattr(self.md, 'Meta'):
+            if 'wiki_base_url' in self.md.Meta:
+                base_url = self.md.Meta['wiki_base_url'][0]
+            if 'wiki_end_url' in self.md.Meta:
+                end_url = self.md.Meta['wiki_end_url'][0]
+            if 'wiki_html_class' in self.md.Meta:
+                html_class = self.md.Meta['wiki_html_class'][0]
+        return base_url, end_url, html_class
+
+
+def makeExtension(*args, **kwargs):
+    return WikiLinkExtension(*args, **kwargs)
diff --git a/markdown/inlinepatterns.py b/markdown/inlinepatterns.py
new file mode 100644
index 0000000..95d358d
--- /dev/null
+++ b/markdown/inlinepatterns.py
@@ -0,0 +1,529 @@
+"""
+INLINE PATTERNS
+=============================================================================
+
+Inline patterns such as *emphasis* are handled by means of auxiliary
+objects, one per pattern.  Pattern objects must be instances of classes
+that extend markdown.Pattern.  Each pattern object uses a single regular
+expression and needs support the following methods:
+
+    pattern.getCompiledRegExp() # returns a regular expression
+
+    pattern.handleMatch(m) # takes a match object and returns
+                           # an ElementTree element or just plain text
+
+All of python markdown's built-in patterns subclass from Pattern,
+but you can add additional patterns that don't.
+
+Also note that all the regular expressions used by inline must
+capture the whole block.  For this reason, they all start with
+'^(.*)' and end with '(.*)!'.  In case with built-in expression
+Pattern takes care of adding the "^(.*)" and "(.*)!".
+
+Finally, the order in which regular expressions are applied is very
+important - e.g. if we first replace http://.../ links with <a> tags
+and _then_ try to replace inline html, we would end up with a mess.
+So, we apply the expressions in the following order:
+
+* escape and backticks have to go before everything else, so
+  that we can preempt any markdown patterns by escaping them.
+
+* then we handle auto-links (must be done before inline html)
+
+* then we handle inline HTML.  At this point we will simply
+  replace all inline HTML strings with a placeholder and add
+  the actual HTML to a hash.
+
+* then inline images (must be done before links)
+
+* then bracketed links, first regular then reference-style
+
+* finally we apply strong and emphasis
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+try:  # pragma: no cover
+    from urllib.parse import urlparse, urlunparse
+except ImportError:  # pragma: no cover
+    from urlparse import urlparse, urlunparse
+try:  # pragma: no cover
+    from html import entities
+except ImportError:  # pragma: no cover
+    import htmlentitydefs as entities
+
+
+def build_inlinepatterns(md_instance, **kwargs):
+    """ Build the default set of inline patterns for Markdown. """
+    inlinePatterns = odict.OrderedDict()
+    inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
+    inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
+    inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
+    inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
+    inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
+    inlinePatterns["image_reference"] = ImageReferencePattern(
+        IMAGE_REFERENCE_RE, md_instance
+    )
+    inlinePatterns["short_reference"] = ReferencePattern(
+        SHORT_REF_RE, md_instance
+    )
+    inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
+    inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
+    inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
+    if md_instance.safeMode != 'escape':
+        inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
+    inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
+    inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
+    inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
+    inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
+    inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
+    inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
+    if md_instance.smart_emphasis:
+        inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
+    else:
+        inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
+    return inlinePatterns
+
+"""
+The actual regular expressions for patterns
+-----------------------------------------------------------------------------
+"""
+
+NOBRACKET = r'[^\]\[]*'
+BRK = (
+    r'\[(' +
+    (NOBRACKET + r'(\[')*6 +
+    (NOBRACKET + r'\])*')*6 +
+    NOBRACKET + r')\]'
+)
+NOIMG = r'(?<!\!)'
+
+# `e=f()` or ``e=f("`")``
+BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)'
+
+# \<
+ESCAPE_RE = r'\\(.)'
+
+# *emphasis*
+EMPHASIS_RE = r'(\*)([^\*]+)\2'
+
+# **strong**
+STRONG_RE = r'(\*{2}|_{2})(.+?)\2'
+
+# ***strongem*** or ***em*strong**
+EM_STRONG_RE = r'(\*|_)\2{2}(.+?)\2(.*?)\2{2}'
+
+# ***strong**em*
+STRONG_EM_RE = r'(\*|_)\2{2}(.+?)\2{2}(.*?)\2'
+
+# _smart_emphasis_
+SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)'
+
+# _emphasis_
+EMPHASIS_2_RE = r'(_)(.+?)\2'
+
+# [text](url) or [text](<url>) or [text](url "title")
+LINK_RE = NOIMG + BRK + \
+    r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
+
+# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>)
+IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
+
+# [Google][3]
+REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
+
+# [Google]
+SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
+
+# ![alt text][2]
+IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
+
+# stand-alone * or _
+NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
+
+# <http://www.123.com>
+AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
+
+# <me@example.com>
+AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
+
+# <...>
+HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
+
+# &amp;
+ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
+
+# two spaces at end of line
+LINE_BREAK_RE = r'  \n'
+
+
+def dequote(string):
+    """Remove quotes from around a string."""
+    if ((string.startswith('"') and string.endswith('"')) or
+       (string.startswith("'") and string.endswith("'"))):
+        return string[1:-1]
+    else:
+        return string
+
+
+ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}")  # {@id=123}
+
+
+def handleAttributes(text, parent):
+    """Set values of an element based on attribute definitions ({@id=123})."""
+    def attributeCallback(match):
+        parent.set(match.group(1), match.group(2).replace('\n', ' '))
+    return ATTR_RE.sub(attributeCallback, text)
+
+
+"""
+The pattern classes
+-----------------------------------------------------------------------------
+"""
+
+
+class Pattern(object):
+    """Base class that inline patterns subclass. """
+
+    def __init__(self, pattern, markdown_instance=None):
+        """
+        Create an instant of an inline pattern.
+
+        Keyword arguments:
+
+        * pattern: A regular expression that matches a pattern
+
+        """
+        self.pattern = pattern
+        self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
+                                      re.DOTALL | re.UNICODE)
+
+        # Api for Markdown to pass safe_mode into instance
+        self.safe_mode = False
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+    def getCompiledRegExp(self):
+        """ Return a compiled regular expression. """
+        return self.compiled_re
+
+    def handleMatch(self, m):
+        """Return a ElementTree element from the given match.
+
+        Subclasses should override this method.
+
+        Keyword arguments:
+
+        * m: A re match object containing a match of the pattern.
+
+        """
+        pass  # pragma: no cover
+
+    def type(self):
+        """ Return class name, to define pattern type """
+        return self.__class__.__name__
+
+    def unescape(self, text):
+        """ Return unescaped text given text with an inline placeholder. """
+        try:
+            stash = self.markdown.treeprocessors['inline'].stashed_nodes
+        except KeyError:  # pragma: no cover
+            return text
+
+        def itertext(el):  # pragma: no cover
+            ' Reimplement Element.itertext for older python versions '
+            tag = el.tag
+            if not isinstance(tag, util.string_type) and tag is not None:
+                return
+            if el.text:
+                yield el.text
+            for e in el:
+                for s in itertext(e):
+                    yield s
+                if e.tail:
+                    yield e.tail
+
+        def get_stash(m):
+            id = m.group(1)
+            if id in stash:
+                value = stash.get(id)
+                if isinstance(value, util.string_type):
+                    return value
+                else:
+                    # An etree Element - return text content only
+                    return ''.join(itertext(value))
+        return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
+
+
+class SimpleTextPattern(Pattern):
+    """ Return a simple text of group(2) of a Pattern. """
+    def handleMatch(self, m):
+        return m.group(2)
+
+
+class EscapePattern(Pattern):
+    """ Return an escaped character. """
+
+    def handleMatch(self, m):
+        char = m.group(2)
+        if char in self.markdown.ESCAPED_CHARS:
+            return '%s%s%s' % (util.STX, ord(char), util.ETX)
+        else:
+            return None
+
+
+class SimpleTagPattern(Pattern):
+    """
+    Return element of type `tag` with a text attribute of group(3)
+    of a Pattern.
+
+    """
+    def __init__(self, pattern, tag):
+        Pattern.__init__(self, pattern)
+        self.tag = tag
+
+    def handleMatch(self, m):
+        el = util.etree.Element(self.tag)
+        el.text = m.group(3)
+        return el
+
+
+class SubstituteTagPattern(SimpleTagPattern):
+    """ Return an element of type `tag` with no children. """
+    def handleMatch(self, m):
+        return util.etree.Element(self.tag)
+
+
+class BacktickPattern(Pattern):
+    """ Return a `<code>` element containing the matching text. """
+    def __init__(self, pattern):
+        Pattern.__init__(self, pattern)
+        self.tag = "code"
+
+    def handleMatch(self, m):
+        el = util.etree.Element(self.tag)
+        el.text = util.AtomicString(m.group(3).strip())
+        return el
+
+
+class DoubleTagPattern(SimpleTagPattern):
+    """Return a ElementTree element nested in tag2 nested in tag1.
+
+    Useful for strong emphasis etc.
+
+    """
+    def handleMatch(self, m):
+        tag1, tag2 = self.tag.split(",")
+        el1 = util.etree.Element(tag1)
+        el2 = util.etree.SubElement(el1, tag2)
+        el2.text = m.group(3)
+        if len(m.groups()) == 5:
+            el2.tail = m.group(4)
+        return el1
+
+
+class HtmlPattern(Pattern):
+    """ Store raw inline html and return a placeholder. """
+    def handleMatch(self, m):
+        rawhtml = self.unescape(m.group(2))
+        place_holder = self.markdown.htmlStash.store(rawhtml)
+        return place_holder
+
+    def unescape(self, text):
+        """ Return unescaped text given text with an inline placeholder. """
+        try:
+            stash = self.markdown.treeprocessors['inline'].stashed_nodes
+        except KeyError:  # pragma: no cover
+            return text
+
+        def get_stash(m):
+            id = m.group(1)
+            value = stash.get(id)
+            if value is not None:
+                try:
+                    return self.markdown.serializer(value)
+                except:
+                    return '\%s' % value
+
+        return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
+
+
+class LinkPattern(Pattern):
+    """ Return a link element from the given match. """
+    def handleMatch(self, m):
+        el = util.etree.Element("a")
+        el.text = m.group(2)
+        title = m.group(13)
+        href = m.group(9)
+
+        if href:
+            if href[0] == "<":
+                href = href[1:-1]
+            el.set("href", self.sanitize_url(self.unescape(href.strip())))
+        else:
+            el.set("href", "")
+
+        if title:
+            title = dequote(self.unescape(title))
+            el.set("title", title)
+        return el
+
+    def sanitize_url(self, url):
+        """
+        Sanitize a url against xss attacks in "safe_mode".
+
+        Rather than specifically blacklisting `javascript:alert("XSS")` and all
+        its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
+        safe url formats. Most urls contain a network location, however some
+        are known not to (i.e.: mailto links). Script urls do not contain a
+        location. Additionally, for `javascript:...`, the scheme would be
+        "javascript" but some aliases will appear to `urlparse()` to have no
+        scheme. On top of that relative links (i.e.: "foo/bar.html") have no
+        scheme. Therefore we must check "path", "parameters", "query" and
+        "fragment" for any literal colons. We don't check "scheme" for colons
+        because it *should* never have any and "netloc" must allow the form:
+        `username:password@host:port`.
+
+        """
+        if not self.markdown.safeMode:
+            # Return immediately bipassing parsing.
+            return url
+
+        try:
+            scheme, netloc, path, params, query, fragment = url = urlparse(url)
+        except ValueError:  # pragma: no cover
+            # Bad url - so bad it couldn't be parsed.
+            return ''
+
+        locless_schemes = ['', 'mailto', 'news']
+        allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
+        if scheme not in allowed_schemes:
+            # Not a known (allowed) scheme. Not safe.
+            return ''
+
+        if netloc == '' and scheme not in locless_schemes:  # pragma: no cover
+            # This should not happen. Treat as suspect.
+            return ''
+
+        for part in url[2:]:
+            if ":" in part:
+                # A colon in "path", "parameters", "query"
+                # or "fragment" is suspect.
+                return ''
+
+        # Url passes all tests. Return url as-is.
+        return urlunparse(url)
+
+
+class ImagePattern(LinkPattern):
+    """ Return a img element from the given match. """
+    def handleMatch(self, m):
+        el = util.etree.Element("img")
+        src_parts = m.group(9).split()
+        if src_parts:
+            src = src_parts[0]
+            if src[0] == "<" and src[-1] == ">":
+                src = src[1:-1]
+            el.set('src', self.sanitize_url(self.unescape(src)))
+        else:
+            el.set('src', "")
+        if len(src_parts) > 1:
+            el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
+
+        if self.markdown.enable_attributes:
+            truealt = handleAttributes(m.group(2), el)
+        else:
+            truealt = m.group(2)
+
+        el.set('alt', self.unescape(truealt))
+        return el
+
+
+class ReferencePattern(LinkPattern):
+    """ Match to a stored reference and return link element. """
+
+    NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
+
+    def handleMatch(self, m):
+        try:
+            id = m.group(9).lower()
+        except IndexError:
+            id = None
+        if not id:
+            # if we got something like "[Google][]" or "[Goggle]"
+            # we'll use "google" as the id
+            id = m.group(2).lower()
+
+        # Clean up linebreaks in id
+        id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
+        if id not in self.markdown.references:  # ignore undefined refs
+            return None
+        href, title = self.markdown.references[id]
+
+        text = m.group(2)
+        return self.makeTag(href, title, text)
+
+    def makeTag(self, href, title, text):
+        el = util.etree.Element('a')
+
+        el.set('href', self.sanitize_url(href))
+        if title:
+            el.set('title', title)
+
+        el.text = text
+        return el
+
+
+class ImageReferencePattern(ReferencePattern):
+    """ Match to a stored reference and return img element. """
+    def makeTag(self, href, title, text):
+        el = util.etree.Element("img")
+        el.set("src", self.sanitize_url(href))
+        if title:
+            el.set("title", title)
+
+        if self.markdown.enable_attributes:
+            text = handleAttributes(text, el)
+
+        el.set("alt", self.unescape(text))
+        return el
+
+
+class AutolinkPattern(Pattern):
+    """ Return a link Element given an autolink (`<http://example/com>`). """
+    def handleMatch(self, m):
+        el = util.etree.Element("a")
+        el.set('href', self.unescape(m.group(2)))
+        el.text = util.AtomicString(m.group(2))
+        return el
+
+
+class AutomailPattern(Pattern):
+    """
+    Return a mailto link Element given an automail link (`<foo@example.com>`).
+    """
+    def handleMatch(self, m):
+        el = util.etree.Element('a')
+        email = self.unescape(m.group(2))
+        if email.startswith("mailto:"):
+            email = email[len("mailto:"):]
+
+        def codepoint2name(code):
+            """Return entity definition by code, or the code if not defined."""
+            entity = entities.codepoint2name.get(code)
+            if entity:
+                return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
+            else:
+                return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
+
+        letters = [codepoint2name(ord(letter)) for letter in email]
+        el.text = util.AtomicString(''.join(letters))
+
+        mailto = "mailto:" + email
+        mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
+                          ord(letter) for letter in mailto])
+        el.set('href', mailto)
+        return el
diff --git a/markdown/odict.py b/markdown/odict.py
new file mode 100644
index 0000000..584ad7c
--- /dev/null
+++ b/markdown/odict.py
@@ -0,0 +1,191 @@
+from __future__ import unicode_literals
+from __future__ import absolute_import
+from . import util
+from copy import deepcopy
+
+
+class OrderedDict(dict):
+    """
+    A dictionary that keeps its keys in the order in which they're inserted.
+
+    Copied from Django's SortedDict with some modifications.
+
+    """
+    def __new__(cls, *args, **kwargs):
+        instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
+        instance.keyOrder = []
+        return instance
+
+    def __init__(self, data=None):
+        if data is None or isinstance(data, dict):
+            data = data or []
+            super(OrderedDict, self).__init__(data)
+            self.keyOrder = list(data) if data else []
+        else:
+            super(OrderedDict, self).__init__()
+            super_set = super(OrderedDict, self).__setitem__
+            for key, value in data:
+                # Take the ordering from first key
+                if key not in self:
+                    self.keyOrder.append(key)
+                # But override with last value in data (dict() does this)
+                super_set(key, value)
+
+    def __deepcopy__(self, memo):
+        return self.__class__([(key, deepcopy(value, memo))
+                               for key, value in self.items()])
+
+    def __copy__(self):
+        # The Python's default copy implementation will alter the state
+        # of self. The reason for this seems complex but is likely related to
+        # subclassing dict.
+        return self.copy()
+
+    def __setitem__(self, key, value):
+        if key not in self:
+            self.keyOrder.append(key)
+        super(OrderedDict, self).__setitem__(key, value)
+
+    def __delitem__(self, key):
+        super(OrderedDict, self).__delitem__(key)
+        self.keyOrder.remove(key)
+
+    def __iter__(self):
+        return iter(self.keyOrder)
+
+    def __reversed__(self):
+        return reversed(self.keyOrder)
+
+    def pop(self, k, *args):
+        result = super(OrderedDict, self).pop(k, *args)
+        try:
+            self.keyOrder.remove(k)
+        except ValueError:
+            # Key wasn't in the dictionary in the first place. No problem.
+            pass
+        return result
+
+    def popitem(self):
+        result = super(OrderedDict, self).popitem()
+        self.keyOrder.remove(result[0])
+        return result
+
+    def _iteritems(self):
+        for key in self.keyOrder:
+            yield key, self[key]
+
+    def _iterkeys(self):
+        for key in self.keyOrder:
+            yield key
+
+    def _itervalues(self):
+        for key in self.keyOrder:
+            yield self[key]
+
+    if util.PY3:  # pragma: no cover
+        items = _iteritems
+        keys = _iterkeys
+        values = _itervalues
+    else:  # pragma: no cover
+        iteritems = _iteritems
+        iterkeys = _iterkeys
+        itervalues = _itervalues
+
+        def items(self):
+            return [(k, self[k]) for k in self.keyOrder]
+
+        def keys(self):
+            return self.keyOrder[:]
+
+        def values(self):
+            return [self[k] for k in self.keyOrder]
+
+    def update(self, dict_):
+        for k in dict_:
+            self[k] = dict_[k]
+
+    def setdefault(self, key, default):
+        if key not in self:
+            self.keyOrder.append(key)
+        return super(OrderedDict, self).setdefault(key, default)
+
+    def value_for_index(self, index):
+        """Returns the value of the item at the given zero-based index."""
+        return self[self.keyOrder[index]]
+
+    def insert(self, index, key, value):
+        """Inserts the key, value pair before the item with the given index."""
+        if key in self.keyOrder:
+            n = self.keyOrder.index(key)
+            del self.keyOrder[n]
+            if n < index:
+                index -= 1
+        self.keyOrder.insert(index, key)
+        super(OrderedDict, self).__setitem__(key, value)
+
+    def copy(self):
+        """Returns a copy of this object."""
+        # This way of initializing the copy means it works for subclasses, too.
+        return self.__class__(self)
+
+    def __repr__(self):
+        """
+        Replaces the normal dict.__repr__ with a version that returns the keys
+        in their Ordered order.
+        """
+        return '{%s}' % ', '.join(
+            ['%r: %r' % (k, v) for k, v in self._iteritems()]
+        )
+
+    def clear(self):
+        super(OrderedDict, self).clear()
+        self.keyOrder = []
+
+    def index(self, key):
+        """ Return the index of a given key. """
+        try:
+            return self.keyOrder.index(key)
+        except ValueError:
+            raise ValueError("Element '%s' was not found in OrderedDict" % key)
+
+    def index_for_location(self, location):
+        """ Return index or None for a given location. """
+        if location == '_begin':
+            i = 0
+        elif location == '_end':
+            i = None
+        elif location.startswith('<') or location.startswith('>'):
+            i = self.index(location[1:])
+            if location.startswith('>'):
+                if i >= len(self):
+                    # last item
+                    i = None
+                else:
+                    i += 1
+        else:
+            raise ValueError('Not a valid location: "%s". Location key '
+                             'must start with a ">" or "<".' % location)
+        return i
+
+    def add(self, key, value, location):
+        """ Insert by key location. """
+        i = self.index_for_location(location)
+        if i is not None:
+            self.insert(i, key, value)
+        else:
+            self.__setitem__(key, value)
+
+    def link(self, key, location):
+        """ Change location of an existing item. """
+        n = self.keyOrder.index(key)
+        del self.keyOrder[n]
+        try:
+            i = self.index_for_location(location)
+            if i is not None:
+                self.keyOrder.insert(i, key)
+            else:
+                self.keyOrder.append(key)
+        except Exception as e:
+            # restore to prevent data loss and reraise
+            self.keyOrder.insert(n, key)
+            raise e
diff --git a/markdown/postprocessors.py b/markdown/postprocessors.py
new file mode 100644
index 0000000..2d4dcb5
--- /dev/null
+++ b/markdown/postprocessors.py
@@ -0,0 +1,108 @@
+"""
+POST-PROCESSORS
+=============================================================================
+
+Markdown also allows post-processors, which are similar to preprocessors in
+that they need to implement a "run" method. However, they are run after core
+processing.
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+
+
+def build_postprocessors(md_instance, **kwargs):
+    """ Build the default postprocessors for Markdown. """
+    postprocessors = odict.OrderedDict()
+    postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
+    postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
+    postprocessors["unescape"] = UnescapePostprocessor()
+    return postprocessors
+
+
+class Postprocessor(util.Processor):
+    """
+    Postprocessors are run after the ElementTree it converted back into text.
+
+    Each Postprocessor implements a "run" method that takes a pointer to a
+    text string, modifies it as necessary and returns a text string.
+
+    Postprocessors must extend markdown.Postprocessor.
+
+    """
+
+    def run(self, text):
+        """
+        Subclasses of Postprocessor should implement a `run` method, which
+        takes the html document as a single text string and returns a
+        (possibly modified) string.
+
+        """
+        pass  # pragma: no cover
+
+
+class RawHtmlPostprocessor(Postprocessor):
+    """ Restore raw html to the document. """
+
+    def run(self, text):
+        """ Iterate over html stash and restore "safe" html. """
+        for i in range(self.markdown.htmlStash.html_counter):
+            html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
+            if self.markdown.safeMode and not safe:
+                if str(self.markdown.safeMode).lower() == 'escape':
+                    html = self.escape(html)
+                elif str(self.markdown.safeMode).lower() == 'remove':
+                    html = ''
+                else:
+                    html = self.markdown.html_replacement_text
+            if (self.isblocklevel(html) and
+               (safe or not self.markdown.safeMode)):
+                text = text.replace(
+                    "<p>%s</p>" %
+                    (self.markdown.htmlStash.get_placeholder(i)),
+                    html + "\n"
+                )
+            text = text.replace(
+                self.markdown.htmlStash.get_placeholder(i), html
+            )
+        return text
+
+    def escape(self, html):
+        """ Basic html escaping """
+        html = html.replace('&', '&amp;')
+        html = html.replace('<', '&lt;')
+        html = html.replace('>', '&gt;')
+        return html.replace('"', '&quot;')
+
+    def isblocklevel(self, html):
+        m = re.match(r'^\<\/?([^ >]+)', html)
+        if m:
+            if m.group(1)[0] in ('!', '?', '@', '%'):
+                # Comment, php etc...
+                return True
+            return util.isBlockLevel(m.group(1))
+        return False
+
+
+class AndSubstitutePostprocessor(Postprocessor):
+    """ Restore valid entities """
+
+    def run(self, text):
+        text = text.replace(util.AMP_SUBSTITUTE, "&")
+        return text
+
+
+class UnescapePostprocessor(Postprocessor):
+    """ Restore escaped chars """
+
+    RE = re.compile('%s(\d+)%s' % (util.STX, util.ETX))
+
+    def unescape(self, m):
+        return util.int2str(int(m.group(1)))
+
+    def run(self, text):
+        return self.RE.sub(self.unescape, text)
diff --git a/markdown/preprocessors.py b/markdown/preprocessors.py
new file mode 100644
index 0000000..7fd38d3
--- /dev/null
+++ b/markdown/preprocessors.py
@@ -0,0 +1,345 @@
+"""
+PRE-PROCESSORS
+=============================================================================
+
+Preprocessors work on source text before we start doing anything too
+complicated.
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+
+
+def build_preprocessors(md_instance, **kwargs):
+    """ Build the default set of preprocessors used by Markdown. """
+    preprocessors = odict.OrderedDict()
+    preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
+    if md_instance.safeMode != 'escape':
+        preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
+    preprocessors["reference"] = ReferencePreprocessor(md_instance)
+    return preprocessors
+
+
+class Preprocessor(util.Processor):
+    """
+    Preprocessors are run after the text is broken into lines.
+
+    Each preprocessor implements a "run" method that takes a pointer to a
+    list of lines of the document, modifies it as necessary and returns
+    either the same pointer or a pointer to a new list.
+
+    Preprocessors must extend markdown.Preprocessor.
+
+    """
+    def run(self, lines):
+        """
+        Each subclass of Preprocessor should override the `run` method, which
+        takes the document as a list of strings split by newlines and returns
+        the (possibly modified) list of lines.
+
+        """
+        pass  # pragma: no cover
+
+
+class NormalizeWhitespace(Preprocessor):
+    """ Normalize whitespace for consistant parsing. """
+
+    def run(self, lines):
+        source = '\n'.join(lines)
+        source = source.replace(util.STX, "").replace(util.ETX, "")
+        source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
+        source = source.expandtabs(self.markdown.tab_length)
+        source = re.sub(r'(?<=\n) +\n', '\n', source)
+        return source.split('\n')
+
+
+class HtmlBlockPreprocessor(Preprocessor):
+    """Remove html blocks from the text and store them for later retrieval."""
+
+    right_tag_patterns = ["</%s>", "%s>"]
+    attrs_pattern = r"""
+        \s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value"
+        |                                                       # OR
+        \s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+)             # attr=value
+        |                                                       # OR
+        \s+(?P<attr2>[^>"'/= ]+)                                # attr
+        """
+    left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % \
+                       attrs_pattern
+    attrs_re = re.compile(attrs_pattern, re.VERBOSE)
+    left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
+    markdown_in_raw = False
+
+    def _get_left_tag(self, block):
+        m = self.left_tag_re.match(block)
+        if m:
+            tag = m.group('tag')
+            raw_attrs = m.group('attrs')
+            attrs = {}
+            if raw_attrs:
+                for ma in self.attrs_re.finditer(raw_attrs):
+                    if ma.group('attr'):
+                        if ma.group('value'):
+                            attrs[ma.group('attr').strip()] = ma.group('value')
+                        else:
+                            attrs[ma.group('attr').strip()] = ""
+                    elif ma.group('attr1'):
+                        if ma.group('value1'):
+                            attrs[ma.group('attr1').strip()] = ma.group(
+                                'value1'
+                            )
+                        else:
+                            attrs[ma.group('attr1').strip()] = ""
+                    elif ma.group('attr2'):
+                        attrs[ma.group('attr2').strip()] = ""
+            return tag, len(m.group(0)), attrs
+        else:
+            tag = block[1:].split(">", 1)[0].lower()
+            return tag, len(tag)+2, {}
+
+    def _recursive_tagfind(self, ltag, rtag, start_index, block):
+        while 1:
+            i = block.find(rtag, start_index)
+            if i == -1:
+                return -1
+            j = block.find(ltag, start_index)
+            # if no ltag, or rtag found before another ltag, return index
+            if (j > i or j == -1):
+                return i + len(rtag)
+            # another ltag found before rtag, use end of ltag as starting
+            # point and search again
+            j = block.find('>', j)
+            start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
+            if start_index == -1:
+                # HTML potentially malformed- ltag has no corresponding
+                # rtag
+                return -1
+
+    def _get_right_tag(self, left_tag, left_index, block):
+        for p in self.right_tag_patterns:
+            tag = p % left_tag
+            i = self._recursive_tagfind(
+                "<%s" % left_tag, tag, left_index, block
+            )
+            if i > 2:
+                return tag.lstrip("<").rstrip(">"), i
+        return block.rstrip()[-left_index:-1].lower(), len(block)
+
+    def _equal_tags(self, left_tag, right_tag):
+        if left_tag[0] in ['?', '@', '%']:  # handle PHP, etc.
+            return True
+        if ("/" + left_tag) == right_tag:
+            return True
+        if (right_tag == "--" and left_tag == "--"):
+            return True
+        elif left_tag == right_tag[1:] and right_tag[0] == "/":
+            return True
+        else:
+            return False
+
+    def _is_oneliner(self, tag):
+        return (tag in ['hr', 'hr/'])
+
+    def _stringindex_to_listindex(self, stringindex, items):
+        """
+        Same effect as concatenating the strings in items,
+        finding the character to which stringindex refers in that string,
+        and returning the index of the item in which that character resides.
+        """
+        items.append('dummy')
+        i, count = 0, 0
+        while count <= stringindex:
+            count += len(items[i])
+            i += 1
+        return i - 1
+
+    def _nested_markdown_in_html(self, items):
+        """Find and process html child elements of the given element block."""
+        for i, item in enumerate(items):
+            if self.left_tag_re.match(item):
+                left_tag, left_index, attrs = \
+                    self._get_left_tag(''.join(items[i:]))
+                right_tag, data_index = self._get_right_tag(
+                    left_tag, left_index, ''.join(items[i:]))
+                right_listindex = \
+                    self._stringindex_to_listindex(data_index, items[i:]) + i
+                if 'markdown' in attrs.keys():
+                    items[i] = items[i][left_index:]  # remove opening tag
+                    placeholder = self.markdown.htmlStash.store_tag(
+                        left_tag, attrs, i + 1, right_listindex + 1)
+                    items.insert(i, placeholder)
+                    if len(items) - right_listindex <= 1:  # last nest, no tail
+                        right_listindex -= 1
+                    items[right_listindex] = items[right_listindex][
+                        :-len(right_tag) - 2]  # remove closing tag
+                else:  # raw html
+                    if len(items) - right_listindex <= 1:  # last element
+                        right_listindex -= 1
+                    offset = 1 if i == right_listindex else 0
+                    placeholder = self.markdown.htmlStash.store('\n\n'.join(
+                        items[i:right_listindex + offset]))
+                    del items[i:right_listindex + offset]
+                    items.insert(i, placeholder)
+        return items
+
+    def run(self, lines):
+        text = "\n".join(lines)
+        new_blocks = []
+        text = text.rsplit("\n\n")
+        items = []
+        left_tag = ''
+        right_tag = ''
+        in_tag = False  # flag
+
+        while text:
+            block = text[0]
+            if block.startswith("\n"):
+                block = block[1:]
+            text = text[1:]
+
+            if block.startswith("\n"):
+                block = block[1:]
+
+            if not in_tag:
+                if block.startswith("<") and len(block.strip()) > 1:
+
+                    if block[1:4] == "!--":
+                        # is a comment block
+                        left_tag, left_index, attrs = "--", 2, {}
+                    else:
+                        left_tag, left_index, attrs = self._get_left_tag(block)
+                    right_tag, data_index = self._get_right_tag(left_tag,
+                                                                left_index,
+                                                                block)
+                    # keep checking conditions below and maybe just append
+
+                    if data_index < len(block) and (util.isBlockLevel(left_tag) or left_tag == '--'):
+                        text.insert(0, block[data_index:])
+                        block = block[:data_index]
+
+                    if not (util.isBlockLevel(left_tag) or block[1] in ["!", "?", "@", "%"]):
+                        new_blocks.append(block)
+                        continue
+
+                    if self._is_oneliner(left_tag):
+                        new_blocks.append(block.strip())
+                        continue
+
+                    if block.rstrip().endswith(">") \
+                            and self._equal_tags(left_tag, right_tag):
+                        if self.markdown_in_raw and 'markdown' in attrs.keys():
+                            block = block[left_index:-len(right_tag) - 2]
+                            new_blocks.append(self.markdown.htmlStash.
+                                              store_tag(left_tag, attrs, 0, 2))
+                            new_blocks.extend([block])
+                        else:
+                            new_blocks.append(
+                                self.markdown.htmlStash.store(block.strip()))
+                        continue
+                    else:
+                        # if is block level tag and is not complete
+                        if (not self._equal_tags(left_tag, right_tag)) and \
+                           (util.isBlockLevel(left_tag) or left_tag == "--"):
+                            items.append(block.strip())
+                            in_tag = True
+                        else:
+                            new_blocks.append(
+                                self.markdown.htmlStash.store(block.strip())
+                            )
+                        continue
+
+                else:
+                    new_blocks.append(block)
+
+            else:
+                items.append(block)
+
+                right_tag, data_index = self._get_right_tag(left_tag, 0, block)
+
+                if self._equal_tags(left_tag, right_tag):
+                    # if find closing tag
+
+                    if data_index < len(block):
+                        # we have more text after right_tag
+                        items[-1] = block[:data_index]
+                        text.insert(0, block[data_index:])
+
+                    in_tag = False
+                    if self.markdown_in_raw and 'markdown' in attrs.keys():
+                        items[0] = items[0][left_index:]
+                        items[-1] = items[-1][:-len(right_tag) - 2]
+                        if items[len(items) - 1]:  # not a newline/empty string
+                            right_index = len(items) + 3
+                        else:
+                            right_index = len(items) + 2
+                        new_blocks.append(self.markdown.htmlStash.store_tag(
+                            left_tag, attrs, 0, right_index))
+                        placeholderslen = len(self.markdown.htmlStash.tag_data)
+                        new_blocks.extend(
+                            self._nested_markdown_in_html(items))
+                        nests = len(self.markdown.htmlStash.tag_data) - \
+                            placeholderslen
+                        self.markdown.htmlStash.tag_data[-1 - nests][
+                            'right_index'] += nests - 2
+                    else:
+                        new_blocks.append(
+                            self.markdown.htmlStash.store('\n\n'.join(items)))
+                    items = []
+
+        if items:
+            if self.markdown_in_raw and 'markdown' in attrs.keys():
+                items[0] = items[0][left_index:]
+                items[-1] = items[-1][:-len(right_tag) - 2]
+                if items[len(items) - 1]:  # not a newline/empty string
+                    right_index = len(items) + 3
+                else:
+                    right_index = len(items) + 2
+                new_blocks.append(
+                    self.markdown.htmlStash.store_tag(
+                        left_tag, attrs, 0, right_index))
+                placeholderslen = len(self.markdown.htmlStash.tag_data)
+                new_blocks.extend(self._nested_markdown_in_html(items))
+                nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
+                self.markdown.htmlStash.tag_data[-1 - nests][
+                    'right_index'] += nests - 2
+            else:
+                new_blocks.append(
+                    self.markdown.htmlStash.store('\n\n'.join(items)))
+            new_blocks.append('\n')
+
+        new_text = "\n\n".join(new_blocks)
+        return new_text.split("\n")
+
+
+class ReferencePreprocessor(Preprocessor):
+    """ Remove reference definitions from text and store for later use. """
+
+    TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
+    RE = re.compile(
+        r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL
+    )
+    TITLE_RE = re.compile(r'^%s$' % TITLE)
+
+    def run(self, lines):
+        new_text = []
+        while lines:
+            line = lines.pop(0)
+            m = self.RE.match(line)
+            if m:
+                id = m.group(1).strip().lower()
+                link = m.group(2).lstrip('<').rstrip('>')
+                t = m.group(5) or m.group(6) or m.group(7)
+                if not t:
+                    # Check next line for title
+                    tm = self.TITLE_RE.match(lines[0])
+                    if tm:
+                        lines.pop(0)
+                        t = tm.group(2) or tm.group(3) or tm.group(4)
+                self.markdown.references[id] = (link, t)
+            else:
+                new_text.append(line)
+
+        return new_text  # + "\n"
diff --git a/markdown/serializers.py b/markdown/serializers.py
new file mode 100644
index 0000000..1e8d9dd
--- /dev/null
+++ b/markdown/serializers.py
@@ -0,0 +1,282 @@
+# markdown/searializers.py
+#
+# Add x/html serialization to Elementree
+# Taken from ElementTree 1.3 preview with slight modifications
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh.  All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS.  IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+ElementTree = util.etree.ElementTree
+QName = util.etree.QName
+if hasattr(util.etree, 'test_comment'):  # pragma: no cover
+    Comment = util.etree.test_comment
+else:  # pragma: no cover
+    Comment = util.etree.Comment
+PI = util.etree.PI
+ProcessingInstruction = util.etree.ProcessingInstruction
+
+__all__ = ['to_html_string', 'to_xhtml_string']
+
+HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
+              "img", "input", "isindex", "link", "meta" "param")
+
+try:
+    HTML_EMPTY = set(HTML_EMPTY)
+except NameError:  # pragma: no cover
+    pass
+
+_namespace_map = {
+    # "well-known" namespace prefixes
+    "http://www.w3.org/XML/1998/namespace": "xml",
+    "http://www.w3.org/1999/xhtml": "html",
+    "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
+    "http://schemas.xmlsoap.org/wsdl/": "wsdl",
+    # xml schema
+    "http://www.w3.org/2001/XMLSchema": "xs",
+    "http://www.w3.org/2001/XMLSchema-instance": "xsi",
+    # dublic core
+    "http://purl.org/dc/elements/1.1/": "dc",
+}
+
+
+def _raise_serialization_error(text):  # pragma: no cover
+    raise TypeError(
+        "cannot serialize %r (type %s)" % (text, type(text).__name__)
+        )
+
+
+def _encode(text, encoding):
+    try:
+        return text.encode(encoding, "xmlcharrefreplace")
+    except (TypeError, AttributeError):  # pragma: no cover
+        _raise_serialization_error(text)
+
+
+def _escape_cdata(text):
+    # escape character data
+    try:
+        # it's worth avoiding do-nothing calls for strings that are
+        # shorter than 500 character, or so.  assume that's, by far,
+        # the most common case in most applications.
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if "<" in text:
+            text = text.replace("<", "&lt;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        return text
+    except (TypeError, AttributeError):  # pragma: no cover
+        _raise_serialization_error(text)
+
+
+def _escape_attrib(text):
+    # escape attribute value
+    try:
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if "<" in text:
+            text = text.replace("<", "&lt;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        if "\"" in text:
+            text = text.replace("\"", "&quot;")
+        if "\n" in text:
+            text = text.replace("\n", "&#10;")
+        return text
+    except (TypeError, AttributeError):  # pragma: no cover
+        _raise_serialization_error(text)
+
+
+def _escape_attrib_html(text):
+    # escape attribute value
+    try:
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if "<" in text:
+            text = text.replace("<", "&lt;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        if "\"" in text:
+            text = text.replace("\"", "&quot;")
+        return text
+    except (TypeError, AttributeError):  # pragma: no cover
+        _raise_serialization_error(text)
+
+
+def _serialize_html(write, elem, qnames, namespaces, format):
+    tag = elem.tag
+    text = elem.text
+    if tag is Comment:
+        write("<!--%s-->" % _escape_cdata(text))
+    elif tag is ProcessingInstruction:
+        write("<?%s?>" % _escape_cdata(text))
+    else:
+        tag = qnames[tag]
+        if tag is None:
+            if text:
+                write(_escape_cdata(text))
+            for e in elem:
+                _serialize_html(write, e, qnames, None, format)
+        else:
+            write("<" + tag)
+            items = elem.items()
+            if items or namespaces:
+                items = sorted(items)  # lexical order
+                for k, v in items:
+                    if isinstance(k, QName):
+                        k = k.text
+                    if isinstance(v, QName):
+                        v = qnames[v.text]
+                    else:
+                        v = _escape_attrib_html(v)
+                    if qnames[k] == v and format == 'html':
+                        # handle boolean attributes
+                        write(" %s" % v)
+                    else:
+                        write(" %s=\"%s\"" % (qnames[k], v))
+                if namespaces:
+                    items = namespaces.items()
+                    items.sort(key=lambda x: x[1])  # sort on prefix
+                    for v, k in items:
+                        if k:
+                            k = ":" + k
+                        write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
+            if format == "xhtml" and tag.lower() in HTML_EMPTY:
+                write(" />")
+            else:
+                write(">")
+                if text:
+                    if tag.lower() in ["script", "style"]:
+                        write(text)
+                    else:
+                        write(_escape_cdata(text))
+                for e in elem:
+                    _serialize_html(write, e, qnames, None, format)
+                if tag.lower() not in HTML_EMPTY:
+                    write("</" + tag + ">")
+    if elem.tail:
+        write(_escape_cdata(elem.tail))
+
+
+def _write_html(root,
+                encoding=None,
+                default_namespace=None,
+                format="html"):
+    assert root is not None
+    data = []
+    write = data.append
+    qnames, namespaces = _namespaces(root, default_namespace)
+    _serialize_html(write, root, qnames, namespaces, format)
+    if encoding is None:
+        return "".join(data)
+    else:
+        return _encode("".join(data))
+
+
+# --------------------------------------------------------------------
+# serialization support
+
+def _namespaces(elem, default_namespace=None):
+    # identify namespaces used in this tree
+
+    # maps qnames to *encoded* prefix:local names
+    qnames = {None: None}
+
+    # maps uri:s to prefixes
+    namespaces = {}
+    if default_namespace:
+        namespaces[default_namespace] = ""
+
+    def add_qname(qname):
+        # calculate serialized qname representation
+        try:
+            if qname[:1] == "{":
+                uri, tag = qname[1:].split("}", 1)
+                prefix = namespaces.get(uri)
+                if prefix is None:
+                    prefix = _namespace_map.get(uri)
+                    if prefix is None:
+                        prefix = "ns%d" % len(namespaces)
+                    if prefix != "xml":
+                        namespaces[uri] = prefix
+                if prefix:
+                    qnames[qname] = "%s:%s" % (prefix, tag)
+                else:
+                    qnames[qname] = tag  # default element
+            else:
+                if default_namespace:
+                    raise ValueError(
+                        "cannot use non-qualified names with "
+                        "default_namespace option"
+                        )
+                qnames[qname] = qname
+        except TypeError:  # pragma: no cover
+            _raise_serialization_error(qname)
+
+    # populate qname and namespaces table
+    try:
+        iterate = elem.iter
+    except AttributeError:
+        iterate = elem.getiterator  # cET compatibility
+    for elem in iterate():
+        tag = elem.tag
+        if isinstance(tag, QName) and tag.text not in qnames:
+            add_qname(tag.text)
+        elif isinstance(tag, util.string_type):
+            if tag not in qnames:
+                add_qname(tag)
+        elif tag is not None and tag is not Comment and tag is not PI:
+            _raise_serialization_error(tag)
+        for key, value in elem.items():
+            if isinstance(key, QName):
+                key = key.text
+            if key not in qnames:
+                add_qname(key)
+            if isinstance(value, QName) and value.text not in qnames:
+                add_qname(value.text)
+        text = elem.text
+        if isinstance(text, QName) and text.text not in qnames:
+            add_qname(text.text)
+    return qnames, namespaces
+
+
+def to_html_string(element):
+    return _write_html(ElementTree(element).getroot(), format="html")
+
+
+def to_xhtml_string(element):
+    return _write_html(ElementTree(element).getroot(), format="xhtml")
diff --git a/markdown/treeprocessors.py b/markdown/treeprocessors.py
new file mode 100644
index 0000000..d06f192
--- /dev/null
+++ b/markdown/treeprocessors.py
@@ -0,0 +1,371 @@
+from __future__ import unicode_literals
+from __future__ import absolute_import
+from . import util
+from . import odict
+from . import inlinepatterns
+
+
+def build_treeprocessors(md_instance, **kwargs):
+    """ Build the default treeprocessors for Markdown. """
+    treeprocessors = odict.OrderedDict()
+    treeprocessors["inline"] = InlineProcessor(md_instance)
+    treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
+    return treeprocessors
+
+
+def isString(s):
+    """ Check if it's string """
+    if not isinstance(s, util.AtomicString):
+        return isinstance(s, util.string_type)
+    return False
+
+
+class Treeprocessor(util.Processor):
+    """
+    Treeprocessors are run on the ElementTree object before serialization.
+
+    Each Treeprocessor implements a "run" method that takes a pointer to an
+    ElementTree, modifies it as necessary and returns an ElementTree
+    object.
+
+    Treeprocessors must extend markdown.Treeprocessor.
+
+    """
+    def run(self, root):
+        """
+        Subclasses of Treeprocessor should implement a `run` method, which
+        takes a root ElementTree. This method can return another ElementTree
+        object, and the existing root ElementTree will be replaced, or it can
+        modify the current tree and return None.
+        """
+        pass  # pragma: no cover
+
+
+class InlineProcessor(Treeprocessor):
+    """
+    A Treeprocessor that traverses a tree, applying inline patterns.
+    """
+
+    def __init__(self, md):
+        self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
+        self.__placeholder_suffix = util.ETX
+        self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+                                      + len(self.__placeholder_suffix)
+        self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
+        self.markdown = md
+        self.inlinePatterns = md.inlinePatterns
+
+    def __makePlaceholder(self, type):
+        """ Generate a placeholder """
+        id = "%04d" % len(self.stashed_nodes)
+        hash = util.INLINE_PLACEHOLDER % id
+        return hash, id
+
+    def __findPlaceholder(self, data, index):
+        """
+        Extract id from data string, start from index
+
+        Keyword arguments:
+
+        * data: string
+        * index: index, from which we start search
+
+        Returns: placeholder id and string index, after the found placeholder.
+
+        """
+        m = self.__placeholder_re.search(data, index)
+        if m:
+            return m.group(1), m.end()
+        else:
+            return None, index + 1
+
+    def __stashNode(self, node, type):
+        """ Add node to stash """
+        placeholder, id = self.__makePlaceholder(type)
+        self.stashed_nodes[id] = node
+        return placeholder
+
+    def __handleInline(self, data, patternIndex=0):
+        """
+        Process string with inline patterns and replace it
+        with placeholders
+
+        Keyword arguments:
+
+        * data: A line of Markdown text
+        * patternIndex: The index of the inlinePattern to start with
+
+        Returns: String with placeholders.
+
+        """
+        if not isinstance(data, util.AtomicString):
+            startIndex = 0
+            while patternIndex < len(self.inlinePatterns):
+                data, matched, startIndex = self.__applyPattern(
+                    self.inlinePatterns.value_for_index(patternIndex),
+                    data, patternIndex, startIndex)
+                if not matched:
+                    patternIndex += 1
+        return data
+
+    def __processElementText(self, node, subnode, isText=True):
+        """
+        Process placeholders in Element.text or Element.tail
+        of Elements popped from self.stashed_nodes.
+
+        Keywords arguments:
+
+        * node: parent node
+        * subnode: processing node
+        * isText: bool variable, True - it's text, False - it's tail
+
+        Returns: None
+
+        """
+        if isText:
+            text = subnode.text
+            subnode.text = None
+        else:
+            text = subnode.tail
+            subnode.tail = None
+
+        childResult = self.__processPlaceholders(text, subnode, isText)
+
+        if not isText and node is not subnode:
+            pos = list(node).index(subnode) + 1
+        else:
+            pos = 0
+
+        childResult.reverse()
+        for newChild in childResult:
+            node.insert(pos, newChild)
+
+    def __processPlaceholders(self, data, parent, isText=True):
+        """
+        Process string with placeholders and generate ElementTree tree.
+
+        Keyword arguments:
+
+        * data: string with placeholders instead of ElementTree elements.
+        * parent: Element, which contains processing inline data
+
+        Returns: list with ElementTree elements with applied inline patterns.
+
+        """
+        def linkText(text):
+            if text:
+                if result:
+                    if result[-1].tail:
+                        result[-1].tail += text
+                    else:
+                        result[-1].tail = text
+                elif not isText:
+                    if parent.tail:
+                        parent.tail += text
+                    else:
+                        parent.tail = text
+                else:
+                    if parent.text:
+                        parent.text += text
+                    else:
+                        parent.text = text
+        result = []
+        strartIndex = 0
+        while data:
+            index = data.find(self.__placeholder_prefix, strartIndex)
+            if index != -1:
+                id, phEndIndex = self.__findPlaceholder(data, index)
+
+                if id in self.stashed_nodes:
+                    node = self.stashed_nodes.get(id)
+
+                    if index > 0:
+                        text = data[strartIndex:index]
+                        linkText(text)
+
+                    if not isString(node):  # it's Element
+                        for child in [node] + list(node):
+                            if child.tail:
+                                if child.tail.strip():
+                                    self.__processElementText(
+                                        node, child, False
+                                    )
+                            if child.text:
+                                if child.text.strip():
+                                    self.__processElementText(child, child)
+                    else:  # it's just a string
+                        linkText(node)
+                        strartIndex = phEndIndex
+                        continue
+
+                    strartIndex = phEndIndex
+                    result.append(node)
+
+                else:  # wrong placeholder
+                    end = index + len(self.__placeholder_prefix)
+                    linkText(data[strartIndex:end])
+                    strartIndex = end
+            else:
+                text = data[strartIndex:]
+                if isinstance(data, util.AtomicString):
+                    # We don't want to loose the AtomicString
+                    text = util.AtomicString(text)
+                linkText(text)
+                data = ""
+
+        return result
+
+    def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
+        """
+        Check if the line fits the pattern, create the necessary
+        elements, add it to stashed_nodes.
+
+        Keyword arguments:
+
+        * data: the text to be processed
+        * pattern: the pattern to be checked
+        * patternIndex: index of current pattern
+        * startIndex: string index, from which we start searching
+
+        Returns: String with placeholders instead of ElementTree elements.
+
+        """
+        match = pattern.getCompiledRegExp().match(data[startIndex:])
+        leftData = data[:startIndex]
+
+        if not match:
+            return data, False, 0
+
+        node = pattern.handleMatch(match)
+
+        if node is None:
+            return data, True, len(leftData)+match.span(len(match.groups()))[0]
+
+        if not isString(node):
+            if not isinstance(node.text, util.AtomicString):
+                # We need to process current node too
+                for child in [node] + list(node):
+                    if not isString(node):
+                        if child.text:
+                            child.text = self.__handleInline(
+                                child.text, patternIndex + 1
+                            )
+                        if child.tail:
+                            child.tail = self.__handleInline(
+                                child.tail, patternIndex
+                            )
+
+        placeholder = self.__stashNode(node, pattern.type())
+
+        return "%s%s%s%s" % (leftData,
+                             match.group(1),
+                             placeholder, match.groups()[-1]), True, 0
+
+    def run(self, tree):
+        """Apply inline patterns to a parsed Markdown tree.
+
+        Iterate over ElementTree, find elements with inline tag, apply inline
+        patterns and append newly created Elements to tree.  If you don't
+        want to process your data with inline paterns, instead of normal
+        string, use subclass AtomicString:
+
+            node.text = markdown.AtomicString("This will not be processed.")
+
+        Arguments:
+
+        * tree: ElementTree object, representing Markdown tree.
+
+        Returns: ElementTree object with applied inline patterns.
+
+        """
+        self.stashed_nodes = {}
+
+        stack = [tree]
+
+        while stack:
+            currElement = stack.pop()
+            insertQueue = []
+            for child in currElement:
+                if child.text and not isinstance(
+                    child.text, util.AtomicString
+                ):
+                    text = child.text
+                    child.text = None
+                    lst = self.__processPlaceholders(
+                        self.__handleInline(text), child
+                    )
+                    stack += lst
+                    insertQueue.append((child, lst))
+                if child.tail:
+                    tail = self.__handleInline(child.tail)
+                    dumby = util.etree.Element('d')
+                    child.tail = None
+                    tailResult = self.__processPlaceholders(tail, dumby, False)
+                    if dumby.tail:
+                        child.tail = dumby.tail
+                    pos = list(currElement).index(child) + 1
+                    tailResult.reverse()
+                    for newChild in tailResult:
+                        currElement.insert(pos, newChild)
+                if len(child):
+                    stack.append(child)
+
+            for element, lst in insertQueue:
+                if self.markdown.enable_attributes:
+                    if element.text and isString(element.text):
+                        element.text = inlinepatterns.handleAttributes(
+                            element.text, element
+                        )
+                i = 0
+                for newChild in lst:
+                    if self.markdown.enable_attributes:
+                        # Processing attributes
+                        if newChild.tail and isString(newChild.tail):
+                            newChild.tail = inlinepatterns.handleAttributes(
+                                newChild.tail, element
+                            )
+                        if newChild.text and isString(newChild.text):
+                            newChild.text = inlinepatterns.handleAttributes(
+                                newChild.text, newChild
+                            )
+                    element.insert(i, newChild)
+                    i += 1
+        return tree
+
+
+class PrettifyTreeprocessor(Treeprocessor):
+    """ Add linebreaks to the html document. """
+
+    def _prettifyETree(self, elem):
+        """ Recursively add linebreaks to ElementTree children. """
+
+        i = "\n"
+        if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
+            if (not elem.text or not elem.text.strip()) \
+                    and len(elem) and util.isBlockLevel(elem[0].tag):
+                elem.text = i
+            for e in elem:
+                if util.isBlockLevel(e.tag):
+                    self._prettifyETree(e)
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+        if not elem.tail or not elem.tail.strip():
+            elem.tail = i
+
+    def run(self, root):
+        """ Add linebreaks to ElementTree root object. """
+
+        self._prettifyETree(root)
+        # Do <br />'s seperately as they are often in the middle of
+        # inline content and missed by _prettifyETree.
+        brs = root.getiterator('br')
+        for br in brs:
+            if not br.tail or not br.tail.strip():
+                br.tail = '\n'
+            else:
+                br.tail = '\n%s' % br.tail
+        # Clean up extra empty lines at end of code blocks.
+        pres = root.getiterator('pre')
+        for pre in pres:
+            if len(pre) and pre[0].tag == 'code':
+                pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
diff --git a/markdown/util.py b/markdown/util.py
new file mode 100644
index 0000000..d3d48f0
--- /dev/null
+++ b/markdown/util.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+import re
+import sys
+
+
+"""
+Python 3 Stuff
+=============================================================================
+"""
+PY3 = sys.version_info[0] == 3
+
+if PY3:  # pragma: no cover
+    string_type = str
+    text_type = str
+    int2str = chr
+else:  # pragma: no cover
+    string_type = basestring   # noqa
+    text_type = unicode        # noqa
+    int2str = unichr           # noqa
+
+
+"""
+Constants you might want to modify
+-----------------------------------------------------------------------------
+"""
+
+
+BLOCK_LEVEL_ELEMENTS = re.compile(
+    "^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
+    "|script|noscript|form|fieldset|iframe|math"
+    "|hr|hr/|style|li|dt|dd|thead|tbody"
+    "|tr|th|td|section|footer|header|group|figure"
+    "|figcaption|aside|article|canvas|output"
+    "|progress|video|nav)$",
+    re.IGNORECASE
+)
+# Placeholders
+STX = '\u0002'  # Use STX ("Start of text") for start-of-placeholder
+ETX = '\u0003'  # Use ETX ("End of text") for end-of-placeholder
+INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
+INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
+INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
+AMP_SUBSTITUTE = STX+"amp"+ETX
+HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
+HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
+TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
+
+
+"""
+Constants you probably do not need to change
+-----------------------------------------------------------------------------
+"""
+
+RTL_BIDI_RANGES = (
+    ('\u0590', '\u07FF'),
+    # Hebrew (0590-05FF), Arabic (0600-06FF),
+    # Syriac (0700-074F), Arabic supplement (0750-077F),
+    # Thaana (0780-07BF), Nko (07C0-07FF).
+    ('\u2D30', '\u2D7F')  # Tifinagh
+)
+
+# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
+# markdown.util import etree`).  Do not import it by yourself.
+
+try:  # pragma: no cover
+    # Is the C implementation of ElementTree available?
+    import xml.etree.cElementTree as etree
+    from xml.etree.ElementTree import Comment
+    # Serializers (including ours) test with non-c Comment
+    etree.test_comment = Comment
+    if etree.VERSION < "1.0.5":
+        raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
+except (ImportError, RuntimeError):  # pragma: no cover
+    # Use the Python implementation of ElementTree?
+    import xml.etree.ElementTree as etree
+    if etree.VERSION < "1.1":
+        raise RuntimeError("ElementTree version 1.1 or higher is required")
+
+
+"""
+AUXILIARY GLOBAL FUNCTIONS
+=============================================================================
+"""
+
+
+def isBlockLevel(tag):
+    """Check if the tag is a block level HTML tag."""
+    if isinstance(tag, string_type):
+        return BLOCK_LEVEL_ELEMENTS.match(tag)
+    # Some ElementTree tags are not strings, so return False.
+    return False
+
+
+def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
+    """Parses a string representing bool value. If parsing was successful,
+       returns True or False. If preserve_none=True, returns True, False,
+       or None. If parsing was not successful, raises  ValueError, or, if
+       fail_on_errors=False, returns None."""
+    if not isinstance(value, string_type):
+        if preserve_none and value is None:
+            return value
+        return bool(value)
+    elif preserve_none and value.lower() == 'none':
+        return None
+    elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
+        return True
+    elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
+        return False
+    elif fail_on_errors:
+        raise ValueError('Cannot parse bool value: %r' % value)
+
+
+"""
+MISC AUXILIARY CLASSES
+=============================================================================
+"""
+
+
+class AtomicString(text_type):
+    """A string which should not be further processed."""
+    pass
+
+
+class Processor(object):
+    def __init__(self, markdown_instance=None):
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+
+class HtmlStash(object):
+    """
+    This class is used for stashing HTML objects that we extract
+    in the beginning and replace with place-holders.
+    """
+
+    def __init__(self):
+        """ Create a HtmlStash. """
+        self.html_counter = 0  # for counting inline html segments
+        self.rawHtmlBlocks = []
+        self.tag_counter = 0
+        self.tag_data = []  # list of dictionaries in the order tags appear
+
+    def store(self, html, safe=False):
+        """
+        Saves an HTML segment for later reinsertion.  Returns a
+        placeholder string that needs to be inserted into the
+        document.
+
+        Keyword arguments:
+
+        * html: an html segment
+        * safe: label an html segment as safe for safemode
+
+        Returns : a placeholder string
+
+        """
+        self.rawHtmlBlocks.append((html, safe))
+        placeholder = self.get_placeholder(self.html_counter)
+        self.html_counter += 1
+        return placeholder
+
+    def reset(self):
+        self.html_counter = 0
+        self.rawHtmlBlocks = []
+
+    def get_placeholder(self, key):
+        return HTML_PLACEHOLDER % key
+
+    def store_tag(self, tag, attrs, left_index, right_index):
+        """Store tag data and return a placeholder."""
+        self.tag_data.append({'tag': tag, 'attrs': attrs,
+                              'left_index': left_index,
+                              'right_index': right_index})
+        placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
+        self.tag_counter += 1  # equal to the tag's index in self.tag_data
+        return placeholder