SVN: packages-ng: markdown markdown/__init__.py markdown/blockparser.py markdown/blockprocessors.py ...

patrys patrys at pld-linux.org
Fri Apr 3 15:26:30 CEST 2009


Author: patrys
Date: Fri Apr  3 15:26:29 2009
New Revision: 10306

Added:
   packages-ng/markdown/
   packages-ng/markdown/__init__.py
   packages-ng/markdown/blockparser.py
   packages-ng/markdown/blockprocessors.py
   packages-ng/markdown/commandline.py
   packages-ng/markdown/etree_loader.py
   packages-ng/markdown/extensions/
   packages-ng/markdown/extensions/__init__.py
   packages-ng/markdown/extensions/abbr.py
   packages-ng/markdown/extensions/codehilite.py
   packages-ng/markdown/extensions/def_list.py
   packages-ng/markdown/extensions/extra.py
   packages-ng/markdown/extensions/fenced_code.py
   packages-ng/markdown/extensions/footnotes.py
   packages-ng/markdown/extensions/headerid.py
   packages-ng/markdown/extensions/html_tidy.py
   packages-ng/markdown/extensions/imagelinks.py
   packages-ng/markdown/extensions/legacy.py
   packages-ng/markdown/extensions/meta.py
   packages-ng/markdown/extensions/rss.py
   packages-ng/markdown/extensions/tables.py
   packages-ng/markdown/extensions/toc.py
   packages-ng/markdown/extensions/wikilinks.py
   packages-ng/markdown/html4.py
   packages-ng/markdown/inlinepatterns.py
   packages-ng/markdown/odict.py
   packages-ng/markdown/postprocessors.py
   packages-ng/markdown/preprocessors.py
   packages-ng/markdown/treeprocessors.py
   packages-ng/mergerepos   (contents, props changed)
   packages-ng/packages/templates/base.html
Modified:
   packages-ng/packages/templates/details.html
   packages-ng/packages/templates/group.html
   packages-ng/packages/templates/index.html
   packages-ng/packages/templates/provides.html
   packages-ng/packages/templates/requires.html
   packages-ng/packages/templates/search.html
   packages-ng/packages/templates/source.html
   packages-ng/packages/views.py
   packages-ng/settings.py
Log:
- prettier formatting, fold packages by name


Added: packages-ng/markdown/__init__.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/__init__.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,603 @@
+"""
+Python Markdown
+===============
+
+Python Markdown converts Markdown to HTML and can be used as a library or
+called from the command line.
+
+## Basic usage as a module:
+
+    import markdown
+    md = Markdown()
+    html = md.convert(your_text_string)
+
+## Basic use from the command line:
+
+    python markdown.py source.txt > destination.html
+
+Run "python markdown.py --help" to see more options.
+
+## Extensions
+
+See <http://www.freewisdom.org/projects/python-markdown/> for more
+information and instructions on how to extend the functionality of
+Python Markdown.  Read that before you try modifying this file.
+
+## Authors and License
+
+Started by [Manfred Stienstra](http://www.dwerg.net/).  Continued and
+maintained  by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
+Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
+
+Contact: markdown at freewisdom.org
+
+Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
+Copyright 200? Django Software Foundation (OrderedDict implementation)
+Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
+Copyright 2004 Manfred Stienstra (the original version)
+
+License: BSD (see docs/LICENSE for details).
+"""
+
+version = "2.0-rc2"
+version_info = (2,0,0, "rc2")
+
+import re
+import codecs
+import sys
+import warnings
+import logging
+from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
+
+
+"""
+CONSTANTS
+=============================================================================
+"""
+
+"""
+Constants you might want to modify
+-----------------------------------------------------------------------------
+"""
+
+# default logging level for command-line use
+COMMAND_LINE_LOGGING_LEVEL = CRITICAL
+TAB_LENGTH = 4               # expand tabs to this many spaces
+ENABLE_ATTRIBUTES = True     # @id = xyz -> <... id="xyz">
+SMART_EMPHASIS = True        # this_or_that does not become this<i>or</i>that
+DEFAULT_OUTPUT_FORMAT = 'xhtml1'     # xhtml or html4 output
+HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
+BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
+                                  "|script|noscript|form|fieldset|iframe|math"
+                                  "|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
+                                  "|tr|th|td")
+DOC_TAG = "div"     # Element used to wrap document - later removed
+
+# Placeholders
+STX = u'\u0002'  # Use STX ("Start of text") for start-of-placeholder
+ETX = u'\u0003'  # Use ETX ("End of text") for end-of-placeholder
+INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
+INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
+AMP_SUBSTITUTE = STX+"amp"+ETX
+
+
+"""
+Constants you probably do not need to change
+-----------------------------------------------------------------------------
+"""
+
+RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
+                     # Hebrew (0590-05FF), Arabic (0600-06FF),
+                     # Syriac (0700-074F), Arabic supplement (0750-077F),
+                     # Thaana (0780-07BF), Nko (07C0-07FF).
+                    (u'\u2D30', u'\u2D7F'), # Tifinagh
+                    )
+
+
+"""
+AUXILIARY GLOBAL FUNCTIONS
+=============================================================================
+"""
+
+
+def message(level, text):
+    """ A wrapper method for logging debug messages. """
+    logger =  logging.getLogger('MARKDOWN')
+    if logger.handlers:
+        # The logger is configured
+        logger.log(level, text)
+        if level > WARN:
+            sys.exit(0)
+    elif level > WARN:
+        raise MarkdownException, text
+    else:
+        warnings.warn(text, MarkdownWarning)
+
+
+def isBlockLevel(tag):
+    """Check if the tag is a block level HTML tag."""
+    return BLOCK_LEVEL_ELEMENTS.match(tag)
+
+"""
+MISC AUXILIARY CLASSES
+=============================================================================
+"""
+
+class AtomicString(unicode):
+    """A string which should not be further processed."""
+    pass
+
+
+class MarkdownException(Exception):
+    """ A Markdown Exception. """
+    pass
+
+
+class MarkdownWarning(Warning):
+    """ A Markdown Warning. """
+    pass
+
+
+"""
+OVERALL DESIGN
+=============================================================================
+
+Markdown processing takes place in four steps:
+
+1. A bunch of "preprocessors" munge the input text.
+2. BlockParser() parses the high-level structural elements of the
+   pre-processed text into an ElementTree.
+3. A bunch of "treeprocessors" are run against the ElementTree. One such
+   treeprocessor runs InlinePatterns against the ElementTree, detecting inline
+   markup.
+4. Some post-processors are run against the text after the ElementTree has
+   been serialized into text.
+5. The output is written to a string.
+
+Those steps are put together by the Markdown() class.
+
+"""
+
+import preprocessors
+import blockprocessors
+import treeprocessors
+import inlinepatterns
+import postprocessors
+import blockparser
+import etree_loader
+import odict
+
+# Extensions should use "markdown.etree" instead of "etree" (or do `from
+# markdown import etree`).  Do not import it by yourself.
+
+etree = etree_loader.importETree()
+
+# Adds the ability to output html4
+import html4
+
+
+class Markdown:
+    """Convert Markdown to HTML."""
+
+    def __init__(self,
+                 extensions=[],
+                 extension_configs={},
+                 safe_mode = False, 
+                 output_format=DEFAULT_OUTPUT_FORMAT):
+        """
+        Creates a new Markdown instance.
+
+        Keyword arguments:
+
+        * extensions: A list of extensions.
+           If they are of type string, the module mdx_name.py will be loaded.
+           If they are a subclass of markdown.Extension, they will be used
+           as-is.
+        * extension-configs: Configuration setting for extensions.
+        * safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
+        * output_format: Format of output. Supported formats are:
+            * "xhtml1": Outputs XHTML 1.x. Default.
+            * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
+            * "html4": Outputs HTML 4
+            * "html": Outputs latest supported version of HTML (currently HTML 4).
+            Note that it is suggested that the more specific formats ("xhtml1" 
+            and "html4") be used as "xhtml" or "html" may change in the future
+            if it makes sense at that time. 
+
+        """
+        
+        self.safeMode = safe_mode
+        self.registeredExtensions = []
+        self.docType = ""
+        self.stripTopLevelTags = True
+
+        # Preprocessors
+        self.preprocessors = odict.OrderedDict()
+        self.preprocessors["html_block"] = \
+                preprocessors.HtmlBlockPreprocessor(self)
+        self.preprocessors["reference"] = \
+                preprocessors.ReferencePreprocessor(self)
+        # footnote preprocessor will be inserted with "<reference"
+
+        # Block processors - ran by the parser
+        self.parser = blockparser.BlockParser()
+        self.parser.blockprocessors['empty'] = \
+                blockprocessors.EmptyBlockProcessor(self.parser)
+        self.parser.blockprocessors['indent'] = \
+                blockprocessors.ListIndentProcessor(self.parser)
+        self.parser.blockprocessors['code'] = \
+                blockprocessors.CodeBlockProcessor(self.parser)
+        self.parser.blockprocessors['hashheader'] = \
+                blockprocessors.HashHeaderProcessor(self.parser)
+        self.parser.blockprocessors['setextheader'] = \
+                blockprocessors.SetextHeaderProcessor(self.parser)
+        self.parser.blockprocessors['hr'] = \
+                blockprocessors.HRProcessor(self.parser)
+        self.parser.blockprocessors['olist'] = \
+                blockprocessors.OListProcessor(self.parser)
+        self.parser.blockprocessors['ulist'] = \
+                blockprocessors.UListProcessor(self.parser)
+        self.parser.blockprocessors['quote'] = \
+                blockprocessors.BlockQuoteProcessor(self.parser)
+        self.parser.blockprocessors['paragraph'] = \
+                blockprocessors.ParagraphProcessor(self.parser)
+
+
+        #self.prePatterns = []
+
+        # Inline patterns - Run on the tree
+        self.inlinePatterns = odict.OrderedDict()
+        self.inlinePatterns["backtick"] = \
+                inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE)
+        self.inlinePatterns["escape"] = \
+                inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE)
+        self.inlinePatterns["reference"] = \
+            inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self)
+        self.inlinePatterns["link"] = \
+                inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self)
+        self.inlinePatterns["image_link"] = \
+                inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self)
+        self.inlinePatterns["image_reference"] = \
+            inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self)
+        self.inlinePatterns["autolink"] = \
+            inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self)
+        self.inlinePatterns["automail"] = \
+            inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self)
+        self.inlinePatterns["linebreak2"] = \
+            inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br')
+        self.inlinePatterns["linebreak"] = \
+            inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br')
+        self.inlinePatterns["html"] = \
+                inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self)
+        self.inlinePatterns["entity"] = \
+                inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self)
+        self.inlinePatterns["not_strong"] = \
+                inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE)
+        self.inlinePatterns["strong_em"] = \
+            inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em')
+        self.inlinePatterns["strong"] = \
+            inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong')
+        self.inlinePatterns["emphasis"] = \
+            inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em')
+        self.inlinePatterns["emphasis2"] = \
+            inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em')
+        # The order of the handlers matters!!!
+
+
+        # Tree processors - run once we have a basic parse.
+        self.treeprocessors = odict.OrderedDict()
+        self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self)
+        self.treeprocessors["prettify"] = \
+                treeprocessors.PrettifyTreeprocessor(self)
+
+        # Postprocessors - finishing touches.
+        self.postprocessors = odict.OrderedDict()
+        self.postprocessors["raw_html"] = \
+                postprocessors.RawHtmlPostprocessor(self)
+        self.postprocessors["amp_substitute"] = \
+                postprocessors.AndSubstitutePostprocessor()
+        # footnote postprocessor will be inserted with ">amp_substitute"
+
+        # Map format keys to serializers
+        self.output_formats = {
+            'html'  : html4.to_html_string, 
+            'html4' : html4.to_html_string,
+            'xhtml' : etree.tostring, 
+            'xhtml1': etree.tostring,
+        }
+
+        self.references = {}
+        self.htmlStash = preprocessors.HtmlStash()
+        self.registerExtensions(extensions = extensions,
+                                configs = extension_configs)
+        self.set_output_format(output_format)
+        self.reset()
+
+    def registerExtensions(self, extensions, configs):
+        """
+        Register extensions with this instance of Markdown.
+
+        Keyword aurguments:
+
+        * extensions: A list of extensions, which can either
+           be strings or objects.  See the docstring on Markdown.
+        * configs: A dictionary mapping module names to config options.
+
+        """
+        for ext in extensions:
+            if isinstance(ext, basestring):
+                ext = load_extension(ext, configs.get(ext, []))
+            try:
+                ext.extendMarkdown(self, globals())
+            except AttributeError:
+                message(ERROR, "Incorrect type! Extension '%s' is "
+                               "neither a string or an Extension." %(repr(ext)))
+            
+
+    def registerExtension(self, extension):
+        """ This gets called by the extension """
+        self.registeredExtensions.append(extension)
+
+    def reset(self):
+        """
+        Resets all state variables so that we can start with a new text.
+        """
+        self.htmlStash.reset()
+        self.references.clear()
+
+        for extension in self.registeredExtensions:
+            extension.reset()
+
+    def set_output_format(self, format):
+        """ Set the output format for the class instance. """
+        try:
+            self.serializer = self.output_formats[format.lower()]
+        except KeyError:
+            message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
+                               % (format, self.output_formats.keys()))
+
+    def convert(self, source):
+        """
+        Convert markdown to serialized XHTML or HTML.
+
+        Keyword arguments:
+
+        * source: Source text as a Unicode string.
+
+        """
+
+        # Fixup the source text
+        if not source:
+            return u""  # a blank unicode string
+        try:
+            source = unicode(source)
+        except UnicodeDecodeError:
+            message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
+            return u""
+
+        source = source.replace(STX, "").replace(ETX, "")
+        source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
+        source = re.sub(r'\n\s+\n', '\n\n', source)
+        source = source.expandtabs(TAB_LENGTH)
+
+        # Split into lines and run the line preprocessors.
+        self.lines = source.split("\n")
+        for prep in self.preprocessors.values():
+            self.lines = prep.run(self.lines)
+
+        # Parse the high-level elements.
+        root = self.parser.parseDocument(self.lines).getroot()
+
+        # Run the tree-processors
+        for treeprocessor in self.treeprocessors.values():
+            newRoot = treeprocessor.run(root)
+            if newRoot:
+                root = newRoot
+
+        # Serialize _properly_.  Strip top-level tags.
+        output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf8"))
+        if self.stripTopLevelTags:
+            start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
+            end = output.rindex('</%s>'%DOC_TAG)
+            output = output[start:end].strip()
+
+        # Run the text post-processors
+        for pp in self.postprocessors.values():
+            output = pp.run(output)
+
+        return output.strip()
+
+    def convertFile(self, input=None, output=None, encoding=None):
+        """Converts a markdown file and returns the HTML as a unicode string.
+
+        Decodes the file using the provided encoding (defaults to utf-8),
+        passes the file content to markdown, and outputs the html to either
+        the provided stream or the file with provided name, using the same
+        encoding as the source file.
+
+        **Note:** This is the only place that decoding and encoding of unicode
+        takes place in Python-Markdown.  (All other code is unicode-in /
+        unicode-out.)
+
+        Keyword arguments:
+
+        * input: Name of source text file.
+        * output: Name of output file. Writes to stdout if `None`.
+        * encoding: Encoding of input and output files. Defaults to utf-8.
+
+        """
+
+        encoding = encoding or "utf-8"
+
+        # Read the source
+        input_file = codecs.open(input, mode="r", encoding=encoding)
+        text = input_file.read()
+        input_file.close()
+        text = text.lstrip(u'\ufeff') # remove the byte-order mark
+
+        # Convert
+        html = self.convert(text)
+
+        # Write to file or stdout
+        if isinstance(output, (str, unicode)):
+            output_file = codecs.open(output, "w", encoding=encoding)
+            output_file.write(html)
+            output_file.close()
+        else:
+            output.write(html.encode(encoding))
+
+
+"""
+Extensions
+-----------------------------------------------------------------------------
+"""
+
+class Extension:
+    """ Base class for extensions to subclass. """
+    def __init__(self, configs = {}):
+        """Create an instance of an Extention.
+
+        Keyword arguments:
+
+        * configs: A dict of configuration setting used by an Extension.
+        """
+        self.config = configs
+
+    def getConfig(self, key):
+        """ Return a setting for the given key or an empty string. """
+        if key in self.config:
+            return self.config[key][0]
+        else:
+            return ""
+
+    def getConfigInfo(self):
+        """ Return all config settings as a list of tuples. """
+        return [(key, self.config[key][1]) for key in self.config.keys()]
+
+    def setConfig(self, key, value):
+        """ Set a config setting for `key` with the given `value`. """
+        self.config[key][0] = value
+
+    def extendMarkdown(self, md, md_globals):
+        """
+        Add the various proccesors and patterns to the Markdown Instance.
+
+        This method must be overriden by every extension.
+
+        Keyword arguments:
+
+        * md: The Markdown instance.
+
+        * md_globals: Global variables in the markdown module namespace.
+
+        """
+        pass
+
+
+def load_extension(ext_name, configs = []):
+    """Load extension by name, then return the module.
+
<<diff output has been trimmed to 500 lines, 104 line(s) remained.>>

Added: packages-ng/markdown/blockparser.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/blockparser.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,95 @@
+
+import markdown
+
+class State(list):
+    """ Track the current and nested state of the parser. 
+    
+    This utility class is used to track the state of the BlockParser and 
+    support multiple levels if nesting. It's just a simple API wrapped around
+    a list. Each time a state is set, that state is appended to the end of the
+    list. Each time a state is reset, that state is removed from the end of
+    the list.
+
+    Therefore, each time a state is set for a nested block, that state must be 
+    reset when we back out of that level of nesting or the state could be
+    corrupted.
+
+    While all the methods of a list object are available, only the three
+    defined below need be used.
+
+    """
+
+    def set(self, state):
+        """ Set a new state. """
+        self.append(state)
+
+    def reset(self):
+        """ Step back one step in nested state. """
+        self.pop()
+
+    def isstate(self, state):
+        """ Test that top (current) level is of given state. """
+        if len(self):
+            return self[-1] == state
+        else:
+            return False
+
+class BlockParser:
+    """ Parse Markdown blocks into an ElementTree object. 
+    
+    A wrapper class that stitches the various BlockProcessors together,
+    looping through them and creating an ElementTree object.
+    """
+
+    def __init__(self):
+        self.blockprocessors = markdown.odict.OrderedDict()
+        self.state = State()
+
+    def parseDocument(self, lines):
+        """ Parse a markdown document into an ElementTree. 
+        
+        Given a list of lines, an ElementTree object (not just a parent Element)
+        is created and the root element is passed to the parser as the parent.
+        The ElementTree object is returned.
+        
+        This should only be called on an entire document, not pieces.
+
+        """
+        # Create a ElementTree from the lines
+        self.root = markdown.etree.Element(markdown.DOC_TAG)
+        self.parseChunk(self.root, '\n'.join(lines))
+        return markdown.etree.ElementTree(self.root)
+
+    def parseChunk(self, parent, text):
+        """ Parse a chunk of markdown text and attach to given etree node. 
+        
+        While the ``text`` argument is generally assumed to contain multiple
+        blocks which will be split on blank lines, it could contain only one
+        block. Generally, this method would be called by extensions when
+        block parsing is required. 
+        
+        The ``parent`` etree Element passed in is altered in place. 
+        Nothing is returned.
+
+        """
+        self.parseBlocks(parent, text.split('\n\n'))
+
+    def parseBlocks(self, parent, blocks):
+        """ Process blocks of markdown text and attach to given etree node. 
+        
+        Given a list of ``blocks``, each blockprocessor is stepped through
+        until there are no blocks left. While an extension could potentially
+        call this method directly, it's generally expected to be used internally.
+
+        This is a public method as an extension may need to add/alter additional
+        BlockProcessors which call this method to recursively parse a nested
+        block.
+
+        """
+        while blocks:
+           for processor in self.blockprocessors.values():
+               if processor.test(parent, blocks[0]):
+                   processor.run(parent, blocks)
+                   break
+
+

Added: packages-ng/markdown/blockprocessors.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/blockprocessors.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,460 @@
+"""
+CORE MARKDOWN BLOCKPARSER
+=============================================================================
+
+This parser handles basic parsing of Markdown blocks.  It doesn't concern itself
+with inline elements such as **bold** or *italics*, but rather just catches 
+blocks, lists, quotes, etc.
+
+The BlockParser is made up of a bunch of BlockProssors, each handling a 
+different type of block. Extensions may add/replace/remove BlockProcessors
+as they need to alter how markdown blocks are parsed.
+
+"""
+
+import re
+import markdown
+
+class BlockProcessor:
+    """ Base class for block processors. 
+    
+    Each subclass will provide the methods below to work with the source and
+    tree. Each processor will need to define it's own ``test`` and ``run``
+    methods. The ``test`` method should return True or False, to indicate
+    whether the current block should be processed by this processor. If the
+    test passes, the parser will call the processors ``run`` method.
+
+    """
+
+    def __init__(self, parser=None):
+        self.parser = parser
+
+    def lastChild(self, parent):
+        """ Return the last child of an etree element. """
+        if len(parent):
+            return parent[-1]
+        else:
+            return None
+
+    def detab(self, text):
+        """ Remove a tab from the front of each line of the given text. """
+        newtext = []
+        lines = text.split('\n')
+        for line in lines:
+            if line.startswith(' '*markdown.TAB_LENGTH):
+                newtext.append(line[markdown.TAB_LENGTH:])
+            elif not line.strip():
+                newtext.append('')
+            else:
+                break
+        return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
+
+    def looseDetab(self, text, level=1):
+        """ Remove a tab from front of lines but allowing dedented lines. """
+        lines = text.split('\n')
+        for i in range(len(lines)):
+            if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
+                lines[i] = lines[i][markdown.TAB_LENGTH*level:]
+        return '\n'.join(lines)
+
+    def test(self, parent, block):
+        """ Test for block type. Must be overridden by subclasses. 
+        
+        As the parser loops through processors, it will call the ``test`` method
+        on each to determine if the given block of text is of that type. This
+        method must return a boolean ``True`` or ``False``. The actual method of
+        testing is left to the needs of that particular block type. It could 
+        be as simple as ``block.startswith(some_string)`` or a complex regular
+        expression. As the block type may be different depending on the parent
+        of the block (i.e. inside a list), the parent etree element is also 
+        provided and may be used as part of the test.
+
+        Keywords:
+        
+        * ``parent``: A etree element which will be the parent of the block.
+        * ``block``: A block of text from the source which has been split at 
+            blank lines.
+        """
+        pass
+
+    def run(self, parent, blocks):
+        """ Run processor. Must be overridden by subclasses. 
+        
+        When the parser determines the appropriate type of a block, the parser
+        will call the corresponding processor's ``run`` method. This method
+        should parse the individual lines of the block and append them to
+        the etree. 
+
+        Note that both the ``parent`` and ``etree`` keywords are pointers
+        to instances of the objects which should be edited in place. Each
+        processor must make changes to the existing objects as there is no
+        mechanism to return new/different objects to replace them.
+
+        This means that this method should be adding SubElements or adding text
+        to the parent, and should remove (``pop``) or add (``insert``) items to
+        the list of blocks.
+
+        Keywords:
+
+        * ``parent``: A etree element which is the parent of the current block.
+        * ``blocks``: A list of all remaining blocks of the document.
+        """
+        pass
+
+
+class ListIndentProcessor(BlockProcessor):
+    """ Process children of list items. 
+    
+    Example:
+        * a list item
+            process this part
+
+            or this part
+
+    """
+
+    INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
+    ITEM_TYPES = ['li']
+    LIST_TYPES = ['ul', 'ol']
+
+    def test(self, parent, block):
+        return block.startswith(' '*markdown.TAB_LENGTH) and \
+                not self.parser.state.isstate('detabbed') and  \
+                (parent.tag in self.ITEM_TYPES or \
+                    (len(parent) and parent[-1] and \
+                        (parent[-1].tag in self.LIST_TYPES)
+                    )
+                )
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        level, sibling = self.get_level(parent, block)
+        block = self.looseDetab(block, level)
+
+        self.parser.state.set('detabbed')
+        if parent.tag in self.ITEM_TYPES:
+            # The parent is already a li. Just parse the child block.
+            self.parser.parseBlocks(parent, [block])
+        elif sibling.tag in self.ITEM_TYPES:
+            # The sibling is a li. Use it as parent.
+            self.parser.parseBlocks(sibling, [block])
+        elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
+            # The parent is a list (``ol`` or ``ul``) which has children.
+            # Assume the last child li is the parent of this block.
+            if sibling[-1].text:
+                # If the parent li has text, that text needs to be moved to a p
+                block = '%s\n\n%s' % (sibling[-1].text, block)
+                sibling[-1].text = ''
+            self.parser.parseChunk(sibling[-1], block)
+        else:
+            self.create_item(sibling, block)
+        self.parser.state.reset()
+
+    def create_item(self, parent, block):
+        """ Create a new li and parse the block with it as the parent. """
+        li = markdown.etree.SubElement(parent, 'li')
+        self.parser.parseBlocks(li, [block])
+ 
+    def get_level(self, parent, block):
+        """ Get level of indent based on list level. """
+        # Get indent level
+        m = self.INDENT_RE.match(block)
+        if m:
+            indent_level = len(m.group(1))/markdown.TAB_LENGTH
+        else:
+            indent_level = 0
+        if self.parser.state.isstate('list'):
+            # We're in a tightlist - so we already are at correct parent.
+            level = 1
+        else:
+            # We're in a looselist - so we need to find parent.
+            level = 0
+        # Step through children of tree to find matching indent level.
+        while indent_level > level:
+            child = self.lastChild(parent)
+            if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
+                if child.tag in self.LIST_TYPES:
+                    level += 1
+                parent = child
+            else:
+                # No more child levels. If we're short of indent_level,
+                # we have a code block. So we stop here.
+                break
+        return level, parent
+
+
+class CodeBlockProcessor(BlockProcessor):
+    """ Process code blocks. """
+
+    def test(self, parent, block):
+        return block.startswith(' '*markdown.TAB_LENGTH)
+    
+    def run(self, parent, blocks):
+        sibling = self.lastChild(parent)
+        block = blocks.pop(0)
+        theRest = ''
+        if sibling and sibling.tag == "pre" and len(sibling) \
+                    and sibling[0].tag == "code":
+            # The previous block was a code block. As blank lines do not start
+            # new code blocks, append this block to the previous, adding back
+            # linebreaks removed from the split into a list.
+            code = sibling[0]
+            block, theRest = self.detab(block)
+            code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
+        else:
+            # This is a new codeblock. Create the elements and insert text.
+            pre = markdown.etree.SubElement(parent, 'pre')
+            code = markdown.etree.SubElement(pre, 'code')
+            block, theRest = self.detab(block)
+            code.text = markdown.AtomicString('%s\n' % block.rstrip())
+        if theRest:
+            # This block contained unindented line(s) after the first indented 
+            # line. Insert these lines as the first block of the master blocks
+            # list for future processing.
+            blocks.insert(0, theRest)
+
+
+class BlockQuoteProcessor(BlockProcessor):
+
+    RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        if m:
+            before = block[:m.start()] # Lines before blockquote
+            # Pass lines before blockquote in recursively for parsing forst.
+            self.parser.parseBlocks(parent, [before])
+            # Remove ``> `` from begining of each line.
+            block = '\n'.join([self.clean(line) for line in 
+                            block[m.start():].split('\n')])
+        sibling = self.lastChild(parent)
+        if sibling and sibling.tag == "blockquote":
+            # Previous block was a blockquote so set that as this blocks parent
+            quote = sibling
+        else:
+            # This is a new blockquote. Create a new parent element.
+            quote = markdown.etree.SubElement(parent, 'blockquote')
+        # Recursively parse block with blockquote as parent.
+        self.parser.parseChunk(quote, block)
+
+    def clean(self, line):
+        """ Remove ``>`` from beginning of a line. """
+        m = self.RE.match(line)
+        if line.strip() == ">":
+            return ""
+        elif m:
+            return m.group(2)
+        else:
+            return line
+
+class OListProcessor(BlockProcessor):
+    """ Process ordered list blocks. """
+
+    TAG = 'ol'
+    # Detect an item (``1. item``). ``group(1)`` contains contents of item.
+    RE = re.compile(r'^[ ]{0,3}\d+\.[ ](.*)')
+    # Detect items on secondary lines. they can be of either list type.
+    CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ](.*)')
+    # Detect indented (nested) items of either type
+    INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ].*')
+
+    def test(self, parent, block):
+        return bool(self.RE.match(block))
+
+    def run(self, parent, blocks):
+        # Check fr multiple items in one block.
+        items = self.get_items(blocks.pop(0))
+        sibling = self.lastChild(parent)
+        if sibling and sibling.tag in ['ol', 'ul']:
+            # Previous block was a list item, so set that as parent
+            lst = sibling
+            # make sure previous item is in a p.
+            if len(lst) and lst[-1].text and not len(lst[-1]):
+                p = markdown.etree.SubElement(lst[-1], 'p')
+                p.text = lst[-1].text
+                lst[-1].text = ''
+            # parse first block differently as it gets wrapped in a p.
+            li = markdown.etree.SubElement(lst, 'li')
+            self.parser.state.set('looselist')
+            firstitem = items.pop(0)
+            self.parser.parseBlocks(li, [firstitem])
+            self.parser.state.reset()
+        else:
+            # This is a new list so create parent with appropriate tag.
+            lst = markdown.etree.SubElement(parent, self.TAG)
+        self.parser.state.set('list')
+        # Loop through items in block, recursively parsing each with the
+        # appropriate parent.
+        for item in items:
+            if item.startswith(' '*markdown.TAB_LENGTH):
+                # Item is indented. Parse with last item as parent
+                self.parser.parseBlocks(lst[-1], [item])
+            else:
+                # New item. Create li and parse with it as parent
+                li = markdown.etree.SubElement(lst, 'li')
+                self.parser.parseBlocks(li, [item])
+        self.parser.state.reset()
+
+    def get_items(self, block):
+        """ Break a block into list items. """
+        items = []
+        for line in block.split('\n'):
+            m = self.CHILD_RE.match(line)
+            if m:
+                # This is a new item. Append
+                items.append(m.group(3))
+            elif self.INDENT_RE.match(line):
+                # This is an indented (possibly nested) item.
+                if items[-1].startswith(' '*markdown.TAB_LENGTH):
+                    # Previous item was indented. Append to that item.
+                    items[-1] = '%s\n%s' % (items[-1], line)
+                else:
+                    items.append(line)
+            else:
+                # This is another line of previous item. Append to that item.
+                items[-1] = '%s\n%s' % (items[-1], line)
+        return items
+
+
+class UListProcessor(OListProcessor):
+    """ Process unordered list blocks. """
+
+    TAG = 'ul'
+    RE = re.compile(r'^[ ]{0,3}[*+-][ ](.*)')
+
+
+class HashHeaderProcessor(BlockProcessor):
+    """ Process Hash Headers. """
+
+    # Detect a header at start of any line in block
+    RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        if m:
+            before = block[:m.start()] # All lines before header
+            after = block[m.end():]    # All lines after header
+            if before:
+                # As the header was not the first line of the block and the
+                # lines before the header must be parsed first,
+                # recursively parse this lines as a block.
+                self.parser.parseBlocks(parent, [before])
+            # Create header using named groups from RE
+            h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level')))
+            h.text = m.group('header').strip()
+            if after:
+                # Insert remaining lines as first block for future parsing.
+                blocks.insert(0, after)
+        else:
+            # This should never happen, but just in case...
+            message(CRITICAL, "We've got a problem header!")
+
+
+class SetextHeaderProcessor(BlockProcessor):
+    """ Process Setext-style Headers. """
+
+    # Detect Setext-style header. Must be first 2 lines of block.
+    RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
+
+    def test(self, parent, block):
+        return bool(self.RE.match(block))
+
+    def run(self, parent, blocks):
+        lines = blocks.pop(0).split('\n')
+        # Determine level. ``=`` is 1 and ``-`` is 2.
+        if lines[1].startswith('='):
+            level = 1
+        else:
+            level = 2
+        h = markdown.etree.SubElement(parent, 'h%d' % level)
+        h.text = lines[0].strip()
+        if len(lines) > 2:
+            # Block contains additional lines. Add to  master blocks for later.
+            blocks.insert(0, '\n'.join(lines[2:]))
+
+
+class HRProcessor(BlockProcessor):
+    """ Process Horizontal Rules. """
+
+    RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*'
+    # Detect hr on any line of a block.
+    SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE)
+    # Match a hr on a single line of text.
+    MATCH_RE = re.compile(r'^%s$' % RE)
+
+    def test(self, parent, block):
+        return bool(self.SEARCH_RE.search(block))
+
+    def run(self, parent, blocks):
+        lines = blocks.pop(0).split('\n')
+        prelines = []
+        # Check for lines in block before hr.
+        for line in lines:
+            m = self.MATCH_RE.match(line)
+            if m:
+                break
+            else:
+                prelines.append(line)
+        if len(prelines):
+            # Recursively parse lines before hr so they get parsed first.
+            self.parser.parseBlocks(parent, ['\n'.join(prelines)])
+        # create hr
+        hr = markdown.etree.SubElement(parent, 'hr')
+        # check for lines in block after hr.
+        lines = lines[len(prelines)+1:]
+        if len(lines):
+            # Add lines after hr to master blocks for later parsing.
+            blocks.insert(0, '\n'.join(lines))
+
+
+class EmptyBlockProcessor(BlockProcessor):
+    """ Process blocks and start with an empty line. """
+
+    # Detect a block that only contains whitespace 
+    # or only whitespace on the first line.
+    RE = re.compile(r'^\s*\n')
+
+    def test(self, parent, block):
+        return bool(self.RE.match(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.match(block)
+        if m:
+            # Add remaining line to master blocks for later.
+            blocks.insert(0, block[m.end():])
+            sibling = self.lastChild(parent)
+            if sibling and sibling.tag == 'pre' and sibling[0] and \
+                    sibling[0].tag == 'code':
+                # Last block is a codeblock. Append to preserve whitespace.
+                sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text )
+
+
+class ParagraphProcessor(BlockProcessor):
+    """ Process Paragraph blocks. """
+
+    def test(self, parent, block):
+        return True
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        if block.strip():
+            # Not a blank block. Add to parent, otherwise throw it away.
+            if self.parser.state.isstate('list'):
+                # The parent is a tight-list. Append to parent.text
+                if parent.text:
+                    parent.text = '%s\n%s' % (parent.text, block)
+                else:
+                    parent.text = block.lstrip()
+            else:
+                # Create a regular paragraph
+                p = markdown.etree.SubElement(parent, 'p')
+                p.text = block.lstrip()

Added: packages-ng/markdown/commandline.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/commandline.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,96 @@
+"""
+COMMAND-LINE SPECIFIC STUFF
+=============================================================================
+
+The rest of the code is specifically for handling the case where Python
+Markdown is called from the command line.
+"""
+
+import markdown
+import sys
+import logging
+from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
+
+EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
+""" The name used in the usage statement displayed for python versions < 2.3.
+(With python 2.3 and higher the usage statement is generated by optparse
+and uses the actual name of the executable called.) """
+
+OPTPARSE_WARNING = """
+Python 2.3 or higher required for advanced command line options.
+For lower versions of Python use:
+
+      %s INPUT_FILE > OUTPUT_FILE
+
+""" % EXECUTABLE_NAME_FOR_USAGE
+
+def parse_options():
+    """
+    Define and parse `optparse` options for command-line usage.
+    """
+
+    try:
+        optparse = __import__("optparse")
+    except:
+        if len(sys.argv) == 2:
+            return {'input': sys.argv[1],
+                    'output': None,
+                    'safe': False,
+                    'extensions': [],
+                    'encoding': None }, CRITICAL
+        else:
+            print OPTPARSE_WARNING
+            return None, None
+
+    parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
+    parser.add_option("-f", "--file", dest="filename", default=sys.stdout,
+                      help="write output to OUTPUT_FILE",
+                      metavar="OUTPUT_FILE")
+    parser.add_option("-e", "--encoding", dest="encoding",
+                      help="encoding for input and output files",)
+    parser.add_option("-q", "--quiet", default = CRITICAL,
+                      action="store_const", const=CRITICAL+10, dest="verbose",
+                      help="suppress all messages")
+    parser.add_option("-v", "--verbose",
+                      action="store_const", const=INFO, dest="verbose",
+                      help="print info messages")
+    parser.add_option("-s", "--safe", dest="safe", default=False,
+                      metavar="SAFE_MODE",
+                      help="safe mode ('replace', 'remove' or 'escape'  user's HTML tag)")
+    parser.add_option("-o", "--output_format", dest="output_format", 
+                      default='xhtml1', metavar="OUTPUT_FORMAT",
+                      help="Format of output. One of 'xhtml1' (default) or 'html4'.")
+    parser.add_option("--noisy",
+                      action="store_const", const=DEBUG, dest="verbose",
+                      help="print debug messages")
+    parser.add_option("-x", "--extension", action="append", dest="extensions",
+                      help = "load extension EXTENSION", metavar="EXTENSION")
+
+    (options, args) = parser.parse_args()
+
+    if not len(args) == 1:
+        parser.print_help()
+        return None, None
+    else:
+        input_file = args[0]
+
+    if not options.extensions:
+        options.extensions = []
+
+    return {'input': input_file,
+            'output': options.filename,
+            'safe_mode': options.safe,
+            'extensions': options.extensions,
+            'encoding': options.encoding,
+            'output_format': options.output_format}, options.verbose
+
+def run():
+    """Run Markdown from the command line."""
+
+    # Parse options and adjust logging level if necessary
+    options, logging_level = parse_options()
+    if not options: sys.exit(0)
+    if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level)
+
+    # Run
+    markdown.markdownFromFile(**options)

Added: packages-ng/markdown/etree_loader.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/etree_loader.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,33 @@
+
+from markdown import message, CRITICAL
+import sys
+
+## Import
+def importETree():
+    """Import the best implementation of ElementTree, return a module object."""
+    etree_in_c = None
+    try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
+        import xml.etree.cElementTree as etree_in_c
+    except ImportError:
+        try: # Is it Python 2.5+ with Python implementation of ElementTree?
+            import xml.etree.ElementTree as etree
+        except ImportError:
+            try: # An earlier version of Python with cElementTree installed?
+                import cElementTree as etree_in_c
+            except ImportError:
+                try: # An earlier version of Python with Python ElementTree?
+                    import elementtree.ElementTree as etree
+                except ImportError:
+                    message(CRITICAL, "Failed to import ElementTree")
+                    sys.exit(1)
+    if etree_in_c and etree_in_c.VERSION < "1.0":
+        message(CRITICAL, "For cElementTree version 1.0 or higher is required.")
+        sys.exit(1)
+    elif etree_in_c :
+        return etree_in_c
+    elif etree.VERSION < "1.1":
+        message(CRITICAL, "For ElementTree version 1.1 or higher is required")
+        sys.exit(1)
+    else :
+        return etree
+

Added: packages-ng/markdown/extensions/__init__.py
==============================================================================

Added: packages-ng/markdown/extensions/abbr.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/abbr.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,95 @@
+'''
+Abbreviation Extension for Python-Markdown
+==========================================
+
+This extension adds abbreviation handling to Python-Markdown.
+
+Simple Usage:
+
+    >>> import markdown
+    >>> text = """
+    ... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
+    ...
+    ... *[ABBR]: Abbreviation
+    ... *[REF]: Abbreviation Reference
+    ... """
+    >>> markdown.markdown(text, ['abbr'])
+    u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
+
+Copyright 2007-2008
+* [Waylan Limberg](http://achinghead.com/)
+* [Seemant Kulleen](http://www.kulleen.org/)
+	
+
+'''
+
+import markdown, re
+from markdown import etree
+
+# Global Vars
+ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
+
+class AbbrExtension(markdown.Extension):
+    """ Abbreviation Extension for Python-Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Insert AbbrPreprocessor before ReferencePreprocessor. """
+        md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
+        
+           
+class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
+    """ Abbreviation Preprocessor - parse text for abbr references. """
+
+    def run(self, lines):
+        '''
+        Find and remove all Abbreviation references from the text.
+        Each reference is set as a new AbbrPattern in the markdown instance.
+        
+        '''
+        new_text = []
+        for line in lines:
+            m = ABBR_REF_RE.match(line)
+            if m:
+                abbr = m.group('abbr').strip()
+                title = m.group('title').strip()
+                self.markdown.inlinePatterns['abbr-%s'%abbr] = \
+                    AbbrPattern(self._generate_pattern(abbr), title)
+            else:
+                new_text.append(line)
+        return new_text
+    
+    def _generate_pattern(self, text):
+        '''
+        Given a string, returns an regex pattern to match that string. 
+        
+        'HTML' -> r'(?P<abbr>[H][T][M][L])' 
+        
+        Note: we force each char as a literal match (in brackets) as we don't 
+        know what they will be beforehand.
+
+        '''
+        chars = list(text)
+        for i in range(len(chars)):
+            chars[i] = r'[%s]' % chars[i]
+        return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
+
+
+class AbbrPattern(markdown.inlinepatterns.Pattern):
+    """ Abbreviation inline pattern. """
+
+    def __init__(self, pattern, title):
+        markdown.inlinepatterns.Pattern.__init__(self, pattern)
+        self.title = title
+
+    def handleMatch(self, m):
+        abbr = etree.Element('abbr')
+        abbr.text = m.group('abbr')
+        abbr.set('title', self.title)
+        return abbr
+
+def makeExtension(configs=None):
+    return AbbrExtension(configs=configs)
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()

Added: packages-ng/markdown/extensions/codehilite.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/codehilite.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+
+"""
+CodeHilite Extension for Python-Markdown
+========================================
+
+Adds code/syntax highlighting to standard Python-Markdown code blocks.
+
+Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
+
+Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite>
+Contact: markdown at freewisdom.org
+ 
+License: BSD (see ../docs/LICENSE for details)
+  
+Dependencies:
+* [Python 2.3+](http://python.org/)
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+* [Pygments](http://pygments.org/)
+
+"""
+
+import markdown
+
+# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
+
+try:
+    TAB_LENGTH = markdown.TAB_LENGTH
+except AttributeError:
+    TAB_LENGTH = 4
+
+
+# ------------------ The Main CodeHilite Class ----------------------
+class CodeHilite:
+    """
+    Determine language of source code, and pass it into the pygments hilighter.
+
+    Basic Usage:
+        >>> code = CodeHilite(src = 'some text')
+        >>> html = code.hilite()
+    
+    * src: Source string or any object with a .readline attribute.
+      
+    * linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default).
+
+    * css_class: Set class name of wrapper div ('codehilite' by default).
+      
+    Low Level Usage:
+        >>> code = CodeHilite()
+        >>> code.src = 'some text' # String or anything with a .readline attr.
+        >>> code.linenos = True  # True or False; Turns line numbering on or of.
+        >>> html = code.hilite()
+    
+    """
+
+    def __init__(self, src=None, linenos=False, css_class="codehilite"):
+        self.src = src
+        self.lang = None
+        self.linenos = linenos
+        self.css_class = css_class
+
+    def hilite(self):
+        """
+        Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with 
+        optional line numbers. The output should then be styled with css to 
+        your liking. No styles are applied by default - only styling hooks 
+        (i.e.: <span class="k">). 
+
+        returns : A string of html.
+    
+        """
+
+        self.src = self.src.strip('\n')
+        
+        self._getLang()
+
+        try:
+            from pygments import highlight
+            from pygments.lexers import get_lexer_by_name, guess_lexer, \
+                                        TextLexer
+            from pygments.formatters import HtmlFormatter
+        except ImportError:
+            # just escape and pass through
+            txt = self._escape(self.src)
+            if self.linenos:
+                txt = self._number(txt)
+            else :
+                txt = '<div class="%s"><pre>%s</pre></div>\n'% \
+                        (self.css_class, txt)
+            return txt
+        else:
+            try:
+                lexer = get_lexer_by_name(self.lang)
+            except ValueError:
+                try:
+                    lexer = guess_lexer(self.src)
+                except ValueError:
+                    lexer = TextLexer()
+            formatter = HtmlFormatter(linenos=self.linenos, 
+                                      cssclass=self.css_class)
+            return highlight(self.src, lexer, formatter)
+
+    def _escape(self, txt):
+        """ basic html escaping """
+        txt = txt.replace('&', '&amp;')
+        txt = txt.replace('<', '&lt;')
+        txt = txt.replace('>', '&gt;')
+        txt = txt.replace('"', '&quot;')
+        return txt
+
+    def _number(self, txt):
+        """ Use <ol> for line numbering """
+        # Fix Whitespace
+        txt = txt.replace('\t', ' '*TAB_LENGTH)
+        txt = txt.replace(" "*4, "&nbsp; &nbsp; ")
+        txt = txt.replace(" "*3, "&nbsp; &nbsp;")
+        txt = txt.replace(" "*2, "&nbsp; ")        
+        
+        # Add line numbers
+        lines = txt.splitlines()
+        txt = '<div class="codehilite"><pre><ol>\n'
+        for line in lines:
+            txt += '\t<li>%s</li>\n'% line
+        txt += '</ol></pre></div>\n'
+        return txt
+
+
+    def _getLang(self):
+        """ 
+        Determines language of a code block from shebang lines and whether said
+        line should be removed or left in place. If the sheband line contains a
+        path (even a single /) then it is assumed to be a real shebang lines and
+        left alone. However, if no path is given (e.i.: #!python or :::python) 
+        then it is assumed to be a mock shebang for language identifitation of a
+        code fragment and removed from the code block prior to processing for 
+        code highlighting. When a mock shebang (e.i: #!python) is found, line 
+        numbering is turned on. When colons are found in place of a shebang 
+        (e.i.: :::python), line numbering is left in the current state - off 
+        by default.
+        
+        """
+
+        import re
+    
+        #split text into lines
+        lines = self.src.split("\n")
+        #pull first line to examine
+        fl = lines.pop(0)
+    
+        c = re.compile(r'''
+            (?:(?:::+)|(?P<shebang>[#]!))	# Shebang or 2 or more colons.
+            (?P<path>(?:/\w+)*[/ ])?        # Zero or 1 path 
+            (?P<lang>[\w+-]*)               # The language 
+            ''',  re.VERBOSE)
+        # search first line for shebang
+        m = c.search(fl)
+        if m:
+            # we have a match
+            try:
+                self.lang = m.group('lang').lower()
+            except IndexError:
+                self.lang = None
+            if m.group('path'):
+                # path exists - restore first line
+                lines.insert(0, fl)
+            if m.group('shebang'):
+                # shebang exists - use line numbers
+                self.linenos = True
+        else:
+            # No match
+            lines.insert(0, fl)
+        
+        self.src = "\n".join(lines).strip("\n")
+
+
+
+# ------------------ The Markdown Extension -------------------------------
+class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
+    """ Hilight source code in code blocks. """
+
+    def run(self, root):
+        """ Find code blocks and store in htmlStash. """
+        blocks = root.getiterator('pre')
+        for block in blocks:
+            children = block.getchildren()
+            if len(children) == 1 and children[0].tag == 'code':
+                code = CodeHilite(children[0].text, 
+                            linenos=self.config['force_linenos'][0],
+                            css_class=self.config['css_class'][0])
+                placeholder = self.markdown.htmlStash.store(code.hilite(), 
+                                                            safe=True)
+                # Clear codeblock in etree instance
+                block.clear()
+                # Change to p element which will later 
+                # be removed when inserting raw html
+                block.tag = 'p'
+                block.text = placeholder
+
+
+class CodeHiliteExtension(markdown.Extension):
+    """ Add source code hilighting to markdown codeblocks. """
+
+    def __init__(self, configs):
+        # define default configs
+        self.config = {
+            'force_linenos' : [False, "Force line numbers - Default: False"],
+            'css_class' : ["codehilite", 
+                           "Set class name for wrapper <div> - Default: codehilite"],
+            }
+        
+        # Override defaults with user settings
+        for key, value in configs:
+            self.setConfig(key, value) 
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add HilitePostprocessor to Markdown instance. """
+        hiliter = HiliteTreeprocessor(md)
+        hiliter.config = self.config
+        md.treeprocessors.add("hilite", hiliter, "_begin") 
+
+
+def makeExtension(configs={}):
+  return CodeHiliteExtension(configs=configs)
+

Added: packages-ng/markdown/extensions/def_list.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/def_list.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,104 @@
+#!/usr/bin/env Python
+"""
+Definition List Extension for Python-Markdown
+=============================================
+
+Added parsing of Definition Lists to Python-Markdown.
+
+A simple example:
+
+    Apple
+    :   Pomaceous fruit of plants of the genus Malus in 
+        the family Rosaceae.
+    :   An american computer company.
+
+    Orange
+    :   The fruit of an evergreen tree of the genus Citrus.
+
+Copyright 2008 - [Waylan Limberg](http://achinghead.com)
+
+"""
+
+import markdown, re
+from markdown import etree
+
+
+class DefListProcessor(markdown.blockprocessors.BlockProcessor):
+    """ Process Definition Lists. """
+
+    RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()]
+        d, theRest = self.detab(block[m.end():])
+        if d:
+            d = '%s\n%s' % (m.group(2), d)
+        else:
+            d = m.group(2)
+        #import ipdb; ipdb.set_trace()
+        sibling = self.lastChild(parent)
+        if not terms and sibling.tag == 'p':
+            # The previous paragraph contains the terms
+            state = 'looselist'
+            terms = sibling.text.split('\n')
+            parent.remove(sibling)
+            # Aquire new sibling
+            sibling = self.lastChild(parent)
+        else:
+            state = 'list'
+
+        if sibling and sibling.tag == 'dl':
+            # This is another item on an existing list
+            dl = sibling
+            if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
+                state = 'looselist'
+        else:
+            # This is a new list
+            dl = etree.SubElement(parent, 'dl')
+        # Add terms
+        for term in terms:
+            dt = etree.SubElement(dl, 'dt')
+            dt.text = term
+        # Add definition
+        self.parser.state.set(state)
+        dd = etree.SubElement(dl, 'dd')
+        self.parser.parseBlocks(dd, [d])
+        self.parser.state.reset()
+
+        if theRest:
+            blocks.insert(0, theRest)
+
+class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
+    """ Process indented children of definition list items. """
+
+    ITEM_TYPES = ['dd']
+    LIST_TYPES = ['dl']
+
+    def create_item(parent, block):
+        """ Create a new dd and parse the block with it as the parent. """
+        dd = markdown.etree.SubElement(parent, 'dd')
+        self.parser.parseBlocks(dd, [block])
+ 
+
+
+class DefListExtension(markdown.Extension):
+    """ Add definition lists to Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add an instance of DefListProcessor to BlockParser. """
+        md.parser.blockprocessors.add('defindent',
+                                      DefListIndentProcessor(md.parser),
+                                      '>indent')
+        md.parser.blockprocessors.add('deflist', 
+                                      DefListProcessor(md.parser),
+                                      '>ulist')
+
+
+def makeExtension(configs={}):
+    return DefListExtension(configs=configs)
+

Added: packages-ng/markdown/extensions/extra.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/extra.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+Python-Markdown Extra Extension
+===============================
+
+A compilation of various Python-Markdown extensions that imitates
+[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
+
+Note that each of the individual extensions still need to be available
+on your PYTHONPATH. This extension simply wraps them all up as a 
+convenience so that only one extension needs to be listed when
+initiating Markdown. See the documentation for each individual
+extension for specifics about that extension.
+
+In the event that one or more of the supported extensions are not 
+available for import, Markdown will issue a warning and simply continue 
+without that extension. 
+
+There may be additional extensions that are distributed with 
+Python-Markdown that are not included here in Extra. Those extensions
+are not part of PHP Markdown Extra, and therefore, not part of
+Python-Markdown Extra. If you really would like Extra to include
+additional extensions, we suggest creating your own clone of Extra
+under a differant name. You could also edit the `extensions` global 
+variable defined below, but be aware that such changes may be lost 
+when you upgrade to any future version of Python-Markdown.
+
+"""
+
+import markdown
+
+extensions = ['fenced_code',
+              'footnotes',
+              'headerid',
+              'def_list',
+              'tables',
+              'abbr',
+              ]
+              
+
+class ExtraExtension(markdown.Extension):
+    """ Add various extensions to Markdown class."""
+
+    def extendMarkdown(self, md, md_globals):
+        """ Register extension instances. """
+        md.registerExtensions(extensions, self.config)
+
+def makeExtension(configs={}):
+    return ExtraExtension(configs=dict(configs))

Added: packages-ng/markdown/extensions/fenced_code.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/fenced_code.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+"""
+Fenced Code Extension for Python Markdown
+=========================================
+
+This extension adds Fenced Code Blocks to Python-Markdown.
+
+    >>> import markdown
+    >>> text = '''
+    ... A paragraph before a fenced code block:
+    ... 
+    ... ~~~
+    ... Fenced code block
+    ... ~~~
+    ... '''
+    >>> html = markdown.markdown(text, extensions=['fenced_code'])
+    >>> html
+    u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
+
+Works with safe_mode also (we check this because we are using the HtmlStash):
+
+    >>> markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
+    u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
+    
+Include tilde's in a code block and wrap with blank lines:
+
+    >>> text = '''
+    ... ~~~~~~~~
+    ... 
+    ... ~~~~
+    ... 
+    ... ~~~~~~~~'''
+    >>> markdown.markdown(text, extensions=['fenced_code'])
+    u'<pre><code>\\n~~~~\\n\\n</code></pre>'
+
+Multiple blocks and language tags:
+
+    >>> text = '''
+    ... ~~~~{.python}
+    ... block one
+    ... ~~~~
+    ... 
+    ... ~~~~.html
+    ... <p>block two</p>
+    ... ~~~~'''
+    >>> markdown.markdown(text, extensions=['fenced_code'])
+    u'<pre><code class="python">block one\\n</code></pre>\\n\\n<pre><code class="html">&lt;p&gt;block two&lt;/p&gt;\\n</code></pre>'
+
+Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
+
+Project website: <http://www.freewisdom.org/project/python-markdown/Fenced__Code__Blocks>
+Contact: markdown at freewisdom.org
+
+License: BSD (see ../docs/LICENSE for details) 
+
+Dependencies:
+* [Python 2.3+](http://python.org)
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+
+"""
+
+import markdown, re
+
+# Global vars
+FENCED_BLOCK_RE = re.compile( \
+    r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$', 
+    re.MULTILINE|re.DOTALL
+    )
+CODE_WRAP = '<pre><code%s>%s</code></pre>'
+LANG_TAG = ' class="%s"'
+
+
+class FencedCodeExtension(markdown.Extension):
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add FencedBlockPreprocessor to the Markdown instance. """
+
+        md.preprocessors.add('fenced_code_block', 
+                                 FencedBlockPreprocessor(md), 
+                                 "_begin")
+
+
+class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
+    
+    def run(self, lines):
+        """ Match and store Fenced Code Blocks in the HtmlStash. """
+        text = "\n".join(lines)
+        while 1:
+            m = FENCED_BLOCK_RE.search(text)
+            if m:
+                lang = ''
+                if m.group('lang'):
+                    lang = LANG_TAG % m.group('lang')
+                code = CODE_WRAP % (lang, self._escape(m.group('code')))
+                placeholder = self.markdown.htmlStash.store(code, safe=True)
+                text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
+            else:
+                break
+        return text.split("\n")
+
+    def _escape(self, txt):
+        """ basic html escaping """
+        txt = txt.replace('&', '&amp;')
+        txt = txt.replace('<', '&lt;')
+        txt = txt.replace('>', '&gt;')
+        txt = txt.replace('"', '&quot;')
+        return txt
+
+
+def makeExtension(configs=None):
+    return FencedCodeExtension()
+
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()

Added: packages-ng/markdown/extensions/footnotes.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/footnotes.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,293 @@
+"""
+========================= FOOTNOTES =================================
+
+This section adds footnote handling to markdown.  It can be used as
+an example for extending python-markdown with relatively complex
+functionality.  While in this case the extension is included inside
+the module itself, it could just as easily be added from outside the
+module.  Not that all markdown classes above are ignorant about
+footnotes.  All footnote functionality is provided separately and
+then added to the markdown instance at the run time.
+
+Footnote functionality is attached by calling extendMarkdown()
+method of FootnoteExtension.  The method also registers the
+extension to allow it's state to be reset by a call to reset()
+method.
+
+Example:
+    Footnotes[^1] have a label[^label] and a definition[^!DEF].
+
+    [^1]: This is a footnote
+    [^label]: A footnote on "label"
+    [^!DEF]: The footnote for definition
+
+"""
+
+import re, markdown
+from markdown import etree
+
+FN_BACKLINK_TEXT = "zz1337820767766393qq"
+NBSP_PLACEHOLDER =  "qq3936677670287331zz"
+DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)')
+TABBED_RE = re.compile(r'((\t)|(    ))(.*)')
+
+class FootnoteExtension(markdown.Extension):
+    """ Footnote Extension. """
+
+    def __init__ (self, configs):
+        """ Setup configs. """
+        self.config = {'PLACE_MARKER':
+                       ["///Footnotes Go Here///",
+                        "The text string that marks where the footnotes go"]}
+
+        for key, value in configs:
+            self.config[key][0] = value
+            
+        self.reset()
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add pieces to Markdown. """
+        md.registerExtension(self)
+        self.parser = md.parser
+        # Insert a preprocessor before ReferencePreprocessor
+        md.preprocessors.add("footnote", FootnotePreprocessor(self),
+                             "<reference")
+        # Insert an inline pattern before ImageReferencePattern
+        FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
+        md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
+                              "<reference")
+        # Insert a tree-processor that would actually add the footnote div
+        # This must be before the inline treeprocessor so inline patterns
+        # run on the contents of the div.
+        md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
+                                 "<inline")
+        # Insert a postprocessor after amp_substitute oricessor
+        md.postprocessors.add("footnote", FootnotePostprocessor(self),
+                                  ">amp_substitute")
+
+    def reset(self):
+        """ Clear the footnotes on reset. """
+        self.footnotes = markdown.odict.OrderedDict()
+
+    def findFootnotesPlaceholder(self, root):
+        """ Return ElementTree Element that contains Footnote placeholder. """
+        def finder(element):
+            for child in element:
+                if child.text:
+                    if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
+                        return child, True
+                if child.tail:
+                    if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
+                        return (child, element), False
+                finder(child)
+            return None
+                
+        res = finder(root)
+        return res
+
+    def setFootnote(self, id, text):
+        """ Store a footnote for later retrieval. """
+        self.footnotes[id] = text
+
+    def makeFootnoteId(self, id):
+        """ Return footnote link id. """
+        return 'fn:%s' % id
+
+    def makeFootnoteRefId(self, id):
+        """ Return footnote back-link id. """
+        return 'fnref:%s' % id
+
+    def makeFootnotesDiv(self, root):
+        """ Return div of footnotes as et Element. """
+
+        if not self.footnotes.keys():
+            return None
+
+        div = etree.Element("div")
+        div.set('class', 'footnote')
+        hr = etree.SubElement(div, "hr")
+        ol = etree.SubElement(div, "ol")
+
+        for id in self.footnotes.keys():
+            li = etree.SubElement(ol, "li")
+            li.set("id", self.makeFootnoteId(id))
+            self.parser.parseChunk(li, self.footnotes[id])
+            backlink = etree.Element("a")
+            backlink.set("href", "#" + self.makeFootnoteRefId(id))
+            backlink.set("rev", "footnote")
+            backlink.set("title", "Jump back to footnote %d in the text" % \
+                            (self.footnotes.index(id)+1))
+            backlink.text = FN_BACKLINK_TEXT
+
+            if li.getchildren():
+                node = li[-1]
+                if node.tag == "p":
+                    node.text = node.text + NBSP_PLACEHOLDER
+                    node.append(backlink)
+                else:
+                    p = etree.SubElement(li, "p")
+                    p.append(backlink)
+        return div
+
+
+class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
+    """ Find all footnote references and store for later use. """
+
+    def __init__ (self, footnotes):
+        self.footnotes = footnotes
+
+    def run(self, lines):
+        lines = self._handleFootnoteDefinitions(lines)
+        text = "\n".join(lines)
+        return text.split("\n")
+
+    def _handleFootnoteDefinitions(self, lines):
+        """
+        Recursively find all footnote definitions in lines.
+
+        Keywords:
+
+        * lines: A list of lines of text
+        
+        Return: A list of lines with footnote definitions removed.
+        
+        """
+        i, id, footnote = self._findFootnoteDefinition(lines)
+
+        if id :
+            plain = lines[:i]
+            detabbed, theRest = self.detectTabbed(lines[i+1:])
+            self.footnotes.setFootnote(id,
+                                       footnote + "\n"
+                                       + "\n".join(detabbed))
+            more_plain = self._handleFootnoteDefinitions(theRest)
+            return plain + [""] + more_plain
+        else :
+            return lines
+
+    def _findFootnoteDefinition(self, lines):
+        """
+        Find the parts of a footnote definition.
+
+        Keywords:
+
+        * lines: A list of lines of text.
+
+        Return: A three item tuple containing the index of the first line of a
+        footnote definition, the id of the definition and the body of the 
+        definition.
+        
+        """
+        counter = 0
+        for line in lines:
+            m = DEF_RE.match(line)
+            if m:
+                return counter, m.group(2), m.group(3)
+            counter += 1
+        return counter, None, None
+
+    def detectTabbed(self, lines):
+        """ Find indented text and remove indent before further proccesing.
+
+        Keyword arguments:
+
+        * lines: an array of strings
+
+        Returns: a list of post processed items and the unused
+        remainder of the original list
+
+        """
+        items = []
+        item = -1
+        i = 0 # to keep track of where we are
+
+        def detab(line):
+            match = TABBED_RE.match(line)
+            if match:
+               return match.group(4)
+
+        for line in lines:
+            if line.strip(): # Non-blank line
+                line = detab(line)
+                if line:
+                    items.append(line)
+                    i += 1
+                    continue
+                else:
+                    return items, lines[i:]
+
+            else: # Blank line: _maybe_ we are done.
+                i += 1 # advance
+
+                # Find the next non-blank line
+                for j in range(i, len(lines)):
+                    if lines[j].strip():
+                        next_line = lines[j]; break
+                else:
+                    break # There is no more text; we are done.
+
+                # Check if the next non-blank line is tabbed
+                if detab(next_line): # Yes, more work to do.
+                    items.append("")
+                    continue
+                else:
+                    break # No, we are done.
+        else:
+            i += 1
+
+        return items, lines[i:]
+
+
+class FootnotePattern(markdown.inlinepatterns.Pattern):
+    """ InlinePattern for footnote markers in a document's body text. """
+
+    def __init__(self, pattern, footnotes):
+        markdown.inlinepatterns.Pattern.__init__(self, pattern)
+        self.footnotes = footnotes
+
+    def handleMatch(self, m):
+        sup = etree.Element("sup")
+        a = etree.SubElement(sup, "a")
+        id = m.group(2)
+        sup.set('id', self.footnotes.makeFootnoteRefId(id))
+        a.set('href', '#' + self.footnotes.makeFootnoteId(id))
+        a.set('rel', 'footnote')
+        a.text = str(self.footnotes.footnotes.index(id) + 1)
+        return sup
+
+
+class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor):
+    """ Build and append footnote div to end of document. """
+
+    def __init__ (self, footnotes):
+        self.footnotes = footnotes
+
+    def run(self, root):
+        footnotesDiv = self.footnotes.makeFootnotesDiv(root)
+        if footnotesDiv:
+            result = self.footnotes.findFootnotesPlaceholder(root)
+            if result:
+                node, isText = result
+                if isText:
+                    node.text = None
+                    node.getchildren().insert(0, footnotesDiv)
+                else:
+                    child, element = node
+                    ind = element.getchildren().find(child)
+                    element.getchildren().insert(ind + 1, footnotesDiv)
+                    child.tail = None
+                fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
+            else:
+                root.append(footnotesDiv)
+
+class FootnotePostprocessor(markdown.postprocessors.Postprocessor):
+    """ Replace placeholders with html entities. """
+
+    def run(self, text):
+        text = text.replace(FN_BACKLINK_TEXT, "&#8617;")
+        return text.replace(NBSP_PLACEHOLDER, "&#160;")
+
+def makeExtension(configs=[]):
+    """ Return an instance of the FootnoteExtension """
+    return FootnoteExtension(configs=configs)
+

Added: packages-ng/markdown/extensions/headerid.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/headerid.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+
+"""
+HeaderID Extension for Python-Markdown
+======================================
+
+Adds ability to set HTML IDs for headers.
+
+Basic usage:
+
+    >>> import markdown
+    >>> text = "# Some Header # {#some_id}"
+    >>> md = markdown.markdown(text, ['headerid'])
+    >>> md
+    u'<h1 id="some_id">Some Header</h1>'
+
+All header IDs are unique:
+
+    >>> text = '''
+    ... #Header
+    ... #Another Header {#header}
+    ... #Third Header {#header}'''
+    >>> md = markdown.markdown(text, ['headerid'])
+    >>> md
+    u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
+
+To fit within a html template's hierarchy, set the header base level:
+
+    >>> text = '''
+    ... #Some Header
+    ... ## Next Level'''
+    >>> md = markdown.markdown(text, ['headerid(level=3)'])
+    >>> md
+    u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
+
+Turn off auto generated IDs:
+
+    >>> text = '''
+    ... # Some Header
+    ... # Header with ID # { #foo }'''
+    >>> md = markdown.markdown(text, ['headerid(forceid=False)'])
+    >>> md
+    u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
+
+Use with MetaData extension:
+
+    >>> text = '''header_level: 2
+    ... header_forceid: Off
+    ...
+    ... # A Header'''
+    >>> md = markdown.markdown(text, ['headerid', 'meta'])
+    >>> md
+    u'<h2>A Header</h2>'
+
+Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
+
+Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
+Contact: markdown at freewisdom.org
+
+License: BSD (see ../docs/LICENSE for details) 
+
+Dependencies:
+* [Python 2.3+](http://python.org)
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+
+"""
+
+import markdown
+from markdown import etree
+import re
+from string import ascii_lowercase, digits, punctuation
+
+ID_CHARS = ascii_lowercase + digits + '-_'
+IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
+
+
+class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
+    """ Replacement BlockProcessor for Header IDs. """
+
+    # Detect a header at start of any line in block
+    RE = re.compile(r"""(^|\n)
+                        (?P<level>\#{1,6})  # group('level') = string of hashes
+                        (?P<header>.*?)     # group('header') = Header text
+                        \#*                 # optional closing hashes
+                        (?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
+                        (\n|$)              #  ^^ group('id') = id attribute
+                     """,
+                     re.VERBOSE)
+
+    IDs = []
+
+    def test(self, parent, block):
+        return bool(self.RE.search(block))
+
+    def run(self, parent, blocks):
+        block = blocks.pop(0)
+        m = self.RE.search(block)
+        if m:
+            before = block[:m.start()] # All lines before header
+            after = block[m.end():]    # All lines after header
+            if before:
+                # As the header was not the first line of the block and the
+                # lines before the header must be parsed first,
+                # recursively parse this lines as a block.
+                self.parser.parseBlocks(parent, [before])
+            # Create header using named groups from RE
+            start_level, force_id = self._get_meta()
+            level = len(m.group('level')) + start_level
+            if level > 6: 
+                level = 6
+            h = markdown.etree.SubElement(parent, 'h%d' % level)
+            h.text = m.group('header').strip()
+            if m.group('id'):
+                h.set('id', self._unique_id(m.group('id')))
+            elif force_id:
+                h.set('id', self._create_id(m.group('header').strip()))
+            if after:
+                # Insert remaining lines as first block for future parsing.
+                blocks.insert(0, after)
+        else:
+            # This should never happen, but just in case...
+            message(CRITICAL, "We've got a problem header!")
+
+    def _get_meta(self):
+        """ Return meta data suported by this ext as a tuple """
+        level = int(self.config['level'][0]) - 1
+        force = self._str2bool(self.config['forceid'][0])
+        if hasattr(self.md, 'Meta'):
+            if self.md.Meta.has_key('header_level'):
+                level = int(self.md.Meta['header_level'][0]) - 1
+            if self.md.Meta.has_key('header_forceid'): 
+                force = self._str2bool(self.md.Meta['header_forceid'][0])
+        return level, force
+
+    def _str2bool(self, s, default=False):
+        """ Convert a string to a booleen value. """
+        s = str(s)
+        if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
+            return False
+        elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
+            return True
+        return default
+
+    def _unique_id(self, id):
+        """ Ensure ID is unique. Append '_1', '_2'... if not """
+        while id in self.IDs:
+            m = IDCOUNT_RE.match(id)
+            if m:
+                id = '%s_%d'% (m.group(1), int(m.group(2))+1)
+            else:
+                id = '%s_%d'% (id, 1)
+        self.IDs.append(id)
+        return id
+
+    def _create_id(self, header):
+        """ Return ID from Header text. """
+        h = ''
+        for c in header.lower().replace(' ', '_'):
+            if c in ID_CHARS:
+                h += c
+            elif c not in punctuation:
+                h += '+'
+        return self._unique_id(h)
+
+
+class HeaderIdExtension (markdown.Extension):
+    def __init__(self, configs):
+        # set defaults
+        self.config = {
+                'level' : ['1', 'Base level for headers.'],
+                'forceid' : ['True', 'Force all headers to have an id.']
+            }
+
+        for key, value in configs:
+            self.setConfig(key, value)
+
+    def extendMarkdown(self, md, md_globals):
+        md.registerExtension(self)
+        self.processor = HeaderIdProcessor(md.parser)
+        self.processor.md = md
+        self.processor.config = self.config
+        # Replace existing hasheader in place.
+        md.parser.blockprocessors['hashheader'] = self.processor
+
+    def reset(self):
+        self.processor.IDs = []
+
+
+def makeExtension(configs=None):
+    return HeaderIdExtension(configs=configs)
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
+

Added: packages-ng/markdown/extensions/html_tidy.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/html_tidy.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+"""
+HTML Tidy Extension for Python-Markdown
+=======================================
+
+Runs [HTML Tidy][] on the output of Python-Markdown using the [uTidylib][] 
+Python wrapper. Both libtidy and uTidylib must be installed on your system.
+
+Note than any Tidy [options][] can be passed in as extension configs. So, 
+for example, to output HTML rather than XHTML, set ``output_xhtml=0``. To
+indent the output, set ``indent=auto`` and to have Tidy wrap the output in 
+``<html>`` and ``<body>`` tags, set ``show_body_only=0``.
+
+[HTML Tidy]: http://tidy.sourceforge.net/
+[uTidylib]: http://utidylib.berlios.de/
+[options]: http://tidy.sourceforge.net/docs/quickref.html
+
+Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php) 
+
+Dependencies:
+* [Python2.3+](http://python.org)
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+* [HTML Tidy](http://utidylib.berlios.de/)
+* [uTidylib](http://utidylib.berlios.de/)
+
+"""
+
+import markdown
+import tidy
+
+class TidyExtension(markdown.Extension):
+
+    def __init__(self, configs):
+        # Set defaults to match typical markdown behavior.
+        self.config = dict(output_xhtml=1,
+                           show_body_only=1,
+                          )
+        # Merge in user defined configs overriding any present if nessecary.
+        for c in configs:
+            self.config[c[0]] = c[1]
+
+    def extendMarkdown(self, md, md_globals):
+        # Save options to markdown instance
+        md.tidy_options = self.config
+        # Add TidyProcessor to postprocessors
+        md.postprocessors['tidy'] = TidyProcessor(md)
+
+
+class TidyProcessor(markdown.postprocessors.Postprocessor):
+
+    def run(self, text):
+        # Pass text to Tidy. As Tidy does not accept unicode we need to encode
+        # it and decode its return value.
+        return unicode(tidy.parseString(text.encode('utf-8'), 
+                                        **self.markdown.tidy_options)) 
+
+
+def makeExtension(configs=None):
+    return TidyExtension(configs=configs)

Added: packages-ng/markdown/extensions/imagelinks.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/imagelinks.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,119 @@
+"""
+========================= IMAGE LINKS =================================
+
+
+Turns paragraphs like
+
+<~~~~~~~~~~~~~~~~~~~~~~~~
+dir/subdir
+dir/subdir
+dir/subdir
+~~~~~~~~~~~~~~
+dir/subdir
+dir/subdir
+dir/subdir
+~~~~~~~~~~~~~~~~~~~>
+
+Into mini-photo galleries.
+
+"""
+
+import re, markdown
+import url_manager
+
+
+IMAGE_LINK = """<a href="%s"><img src="%s" title="%s"/></a>"""
+SLIDESHOW_LINK = """<a href="%s" target="_blank">[slideshow]</a>"""
+ALBUM_LINK = """&nbsp;<a href="%s">[%s]</a>"""
+
+
+class ImageLinksExtension(markdown.Extension):
+
+    def extendMarkdown(self, md, md_globals):
+
+        md.preprocessors.add("imagelink", ImageLinkPreprocessor(md), "_begin")
+
+
+class ImageLinkPreprocessor(markdown.preprocessors.Preprocessor):
+
+    def run(self, lines):
+
+        url = url_manager.BlogEntryUrl(url_manager.BlogUrl("all"),
+                                       "2006/08/29/the_rest_of_our")
+
+
+        all_images = []
+        blocks = []
+        in_image_block = False
+
+        new_lines = []
+        
+        for line in lines:
+
+            if line.startswith("<~~~~~~~"):
+                albums = []
+                rows = []
+                in_image_block = True
+
+            if not in_image_block:
+
+                new_lines.append(line)
+
+            else:
+
+                line = line.strip()
+                
+                if line.endswith("~~~~~~>") or not line:
+                    in_image_block = False
+                    new_block = "<div><br/><center><span class='image-links'>\n"
+
+                    album_url_hash = {}
+
+                    for row in rows:
+                        for photo_url, title in row:
+                            new_block += "&nbsp;"
+                            new_block += IMAGE_LINK % (photo_url,
+                                                       photo_url.get_thumbnail(),
+                                                       title)
+                            
+                            album_url_hash[str(photo_url.get_album())] = 1
+                        
+                    new_block += "<br/>"
+                            
+                    new_block += "</span>"
+                    new_block += SLIDESHOW_LINK % url.get_slideshow()
+
+                    album_urls = album_url_hash.keys()
+                    album_urls.sort()
+
+                    if len(album_urls) == 1:
+                        new_block += ALBUM_LINK % (album_urls[0], "complete album")
+                    else :
+                        for i in range(len(album_urls)) :
+                            new_block += ALBUM_LINK % (album_urls[i],
+                                                       "album %d" % (i + 1) )
+                    
+                    new_lines.append(new_block + "</center><br/></div>")
+
+                elif line[1:6] == "~~~~~" :
+                    rows.append([])  # start a new row
+                else :
+                    parts = line.split()
+                    line = parts[0]
+                    title = " ".join(parts[1:])
+
+                    album, photo = line.split("/")
+                    photo_url = url.get_photo(album, photo,
+                                              len(all_images)+1)
+                    all_images.append(photo_url)                        
+                    rows[-1].append((photo_url, title))
+
+                    if not album in albums :
+                        albums.append(album)
+
+        return new_lines
+
+
+def makeExtension(configs):
+    return ImageLinksExtension(configs)
+

Added: packages-ng/markdown/extensions/legacy.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/legacy.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,468 @@
+"""
+Legacy Extension for Python-Markdown
+====================================
+
+Replaces the core parser with the old one.
+
+"""
+
+import markdown, re
+from markdown import etree
+
+"""Basic and reusable regular expressions."""
+
+def wrapRe(raw_re) : return re.compile("^%s$" % raw_re, re.DOTALL)
+CORE_RE = {
+    'header':          wrapRe(r'(#{1,6})[ \t]*(.*?)[ \t]*(#*)'), # # A title
+    'reference-def':   wrapRe(r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)'),
+                               # [Google]: http://www.google.com/
+    'containsline':    wrapRe(r'([-]*)$|^([=]*)'), # -----, =====, etc.
+    'ol':              wrapRe(r'[ ]{0,3}[\d]*\.\s+(.*)'), # 1. text
+    'ul':              wrapRe(r'[ ]{0,3}[*+-]\s+(.*)'), # "* text"
+    'isline1':         wrapRe(r'(\**)'), # ***
+    'isline2':         wrapRe(r'(\-*)'), # ---
+    'isline3':         wrapRe(r'(\_*)'), # ___
+    'tabbed':          wrapRe(r'((\t)|(    ))(.*)'), # an indented line
+    'quoted':          wrapRe(r'[ ]{0,2}> ?(.*)'), # a quoted block ("> ...")
+    'containsline':    re.compile(r'^([-]*)$|^([=]*)$', re.M),
+    'attr':            re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
+}
+
+class MarkdownParser:
+    """Parser Markdown into a ElementTree."""
+
+    def __init__(self):
+        pass
+
+    def parseDocument(self, lines):
+        """Parse a markdown string into an ElementTree."""
+        # Create a ElementTree from the lines
+        root = etree.Element("div")
+        buffer = []
+        for line in lines:
+            if line.startswith("#"):
+                self.parseChunk(root, buffer)
+                buffer = [line]
+            else:
+                buffer.append(line)
+
+        self.parseChunk(root, buffer)
+
+        return etree.ElementTree(root)
+
+    def parseChunk(self, parent_elem, lines, inList=0, looseList=0):
+        """Process a chunk of markdown-formatted text and attach the parse to
+        an ElementTree node.
+
+        Process a section of a source document, looking for high
+        level structural elements like lists, block quotes, code
+        segments, html blocks, etc.  Some those then get stripped
+        of their high level markup (e.g. get unindented) and the
+        lower-level markup is processed recursively.
+
+        Keyword arguments:
+
+        * parent_elem: The ElementTree element to which the content will be
+                       added.
+        * lines: a list of lines
+        * inList: a level
+
+        Returns: None
+
+        """
+        # Loop through lines until none left.
+        while lines:
+            # Skipping empty line
+            if not lines[0]:
+                lines = lines[1:]
+                continue
+
+            # Check if this section starts with a list, a blockquote or
+            # a code block.  If so, process them.
+            processFn = { 'ul':     self.__processUList,
+                          'ol':     self.__processOList,
+                          'quoted': self.__processQuote,
+                          'tabbed': self.__processCodeBlock}
+            for regexp in ['ul', 'ol', 'quoted', 'tabbed']:
+                m = CORE_RE[regexp].match(lines[0])
+                if m:
+                    processFn[regexp](parent_elem, lines, inList)
+                    return
+
+            # We are NOT looking at one of the high-level structures like
+            # lists or blockquotes.  So, it's just a regular paragraph
+            # (though perhaps nested inside a list or something else).  If
+            # we are NOT inside a list, we just need to look for a blank
+            # line to find the end of the block.  If we ARE inside a
+            # list, however, we need to consider that a sublist does not
+            # need to be separated by a blank line.  Rather, the following
+            # markup is legal:
+            #
+            # * The top level list item
+            #
+            #     Another paragraph of the list.  This is where we are now.
+            #     * Underneath we might have a sublist.
+            #
+
+            if inList:
+                start, lines  = self.__linesUntil(lines, (lambda line:
+                                 CORE_RE['ul'].match(line)
+                                 or CORE_RE['ol'].match(line)
+                                                  or not line.strip()))
+                self.parseChunk(parent_elem, start, inList-1,
+                                looseList=looseList)
+                inList = inList-1
+
+            else: # Ok, so it's just a simple block
+                test = lambda line: not line.strip() or line[0] == '>'
+                paragraph, lines = self.__linesUntil(lines, test)
+                if len(paragraph) and paragraph[0].startswith('#'):
+                    self.__processHeader(parent_elem, paragraph)
+                elif len(paragraph) and CORE_RE["isline3"].match(paragraph[0]):
+                    self.__processHR(parent_elem)
+                    lines = paragraph[1:] + lines
+                elif paragraph:
+                    self.__processParagraph(parent_elem, paragraph,
+                                          inList, looseList)
+
+            if lines and not lines[0].strip():
+                lines = lines[1:]  # skip the first (blank) line
+
+    def __processHR(self, parentElem):
+        hr = etree.SubElement(parentElem, "hr")
+
+    def __processHeader(self, parentElem, paragraph):
+        m = CORE_RE['header'].match(paragraph[0])
+        if m:
+            level = len(m.group(1))
+            h = etree.SubElement(parentElem, "h%d" % level)
+            h.text = m.group(2).strip()
+        else:
+            message(CRITICAL, "We've got a problem header!")
+
+    def __processParagraph(self, parentElem, paragraph, inList, looseList):
+
+        if ( parentElem.tag == 'li'
+                and not (looseList or parentElem.getchildren())):
+
+            # If this is the first paragraph inside "li", don't
+            # put <p> around it - append the paragraph bits directly
+            # onto parentElem
+            el = parentElem
+        else:
+            # Otherwise make a "p" element
+            el = etree.SubElement(parentElem, "p")
+
+        dump = []
+
+        # Searching for hr or header
+        for line in paragraph:
+            # it's hr
+            if CORE_RE["isline3"].match(line):
+                el.text = "\n".join(dump)
+                self.__processHR(el)
+                dump = []
+            # it's header
+            elif line.startswith("#"):
+                el.text = "\n".join(dump)
+                self.__processHeader(parentElem, [line])
+                dump = []
+            else:
+                dump.append(line)
+        if dump:
+            text = "\n".join(dump)
+            el.text = text
+
+    def __processUList(self, parentElem, lines, inList):
+        self.__processList(parentElem, lines, inList, listexpr='ul', tag='ul')
+
+    def __processOList(self, parentElem, lines, inList):
+        self.__processList(parentElem, lines, inList, listexpr='ol', tag='ol')
+
+    def __processList(self, parentElem, lines, inList, listexpr, tag):
+        """
+        Given a list of document lines starting with a list item,
+        finds the end of the list, breaks it up, and recursively
+        processes each list item and the remainder of the text file.
+
+        Keyword arguments:
+
+        * parentElem: A ElementTree element to which the content will be added
+        * lines: a list of lines
+        * inList: a level
+
+        Returns: None
+
+        """
+        ul = etree.SubElement(parentElem, tag) # ul might actually be '<ol>'
+
+        looseList = 0
+
+        # Make a list of list items
+        items = []
+        item = -1
+
+        i = 0  # a counter to keep track of where we are
+        for line in lines:
+            loose = 0
+            if not line.strip():
+                # If we see a blank line, this _might_ be the end of the list
+                i += 1
+                loose = 1
+
+                # Find the next non-blank line
+                for j in range(i, len(lines)):
+                    if lines[j].strip():
+                        next = lines[j]
+                        break
+                else:
+                    # There is no more text => end of the list
+                    break
+
+                # Check if the next non-blank line is still a part of the list
+
+                if ( CORE_RE[listexpr].match(next) or
+                     CORE_RE['tabbed'].match(next) ):
+                    # get rid of any white space in the line
+                    items[item].append(line.strip())
+                    looseList = loose or looseList
+                    continue
+                else:
+                    break # found end of the list
+
+            # Now we need to detect list items (at the current level)
+            # while also detabing child elements if necessary
+
+            for expr in ['ul', 'ol', 'tabbed']:
+                m = CORE_RE[expr].match(line)
+                if m:
+                    if expr in ['ul', 'ol']:  # We are looking at a new item
+                        #if m.group(1) :
+                        # Removed the check to allow for a blank line
+                        # at the beginning of the list item
+                        items.append([m.group(1)])
+                        item += 1
+                    elif expr == 'tabbed':  # This line needs to be detabbed
+                        items[item].append(m.group(4)) #after the 'tab'
+                    i += 1
+                    break
+            else:
+                items[item].append(line)  # Just regular continuation
+                i += 1 # added on 2006.02.25
+        else:
+            i += 1
+
+        # Add the ElementTree elements
+        for item in items:
+            li = etree.SubElement(ul, "li")
+            self.parseChunk(li, item, inList + 1, looseList = looseList)
+
+        # Process the remaining part of the section
+        self.parseChunk(parentElem, lines[i:], inList)
+
+    def __linesUntil(self, lines, condition):
+        """
+        A utility function to break a list of lines upon the
+        first line that satisfied a condition.  The condition
+        argument should be a predicate function.
+
+        """
+        i = -1
+        for line in lines:
+            i += 1
+            if condition(line):
+                break
+        else:
+            i += 1
+        return lines[:i], lines[i:]
+
+    def __processQuote(self, parentElem, lines, inList):
+        """
+        Given a list of document lines starting with a quote finds
+        the end of the quote, unindents it and recursively
+        processes the body of the quote and the remainder of the
+        text file.
+
+        Keyword arguments:
+
+        * parentElem: ElementTree element to which the content will be added
+        * lines: a list of lines
+        * inList: a level
+
+        Returns: None
+
+        """
+        dequoted = []
+        i = 0
+        blank_line = False # allow one blank line between paragraphs
+        for line in lines:
+            m = CORE_RE['quoted'].match(line)
+            if m:
+                dequoted.append(m.group(1))
+                i += 1
+                blank_line = False
+            elif not blank_line and line.strip() != '':
+                dequoted.append(line)
+                i += 1
+            elif not blank_line and line.strip() == '':
+                dequoted.append(line)
+                i += 1
+                blank_line = True
+            else:
+                break
+
+        blockquote = etree.SubElement(parentElem, "blockquote")
+
+        self.parseChunk(blockquote, dequoted, inList)
+        self.parseChunk(parentElem, lines[i:], inList)
+
+    def __processCodeBlock(self, parentElem, lines, inList):
+        """
+        Given a list of document lines starting with a code block
+        finds the end of the block, puts it into the ElementTree verbatim
+        wrapped in ("<pre><code>") and recursively processes the
+        the remainder of the text file.
+
+        Keyword arguments:
+
+        * parentElem: ElementTree element to which the content will be added
+        * lines: a list of lines
+        * inList: a level
+
+        Returns: None
+
+        """
+        detabbed, theRest = self.detectTabbed(lines)
+        pre = etree.SubElement(parentElem, "pre")
+        code = etree.SubElement(pre, "code")
+        text = "\n".join(detabbed).rstrip()+"\n"
+        code.text = markdown.AtomicString(text)
+        self.parseChunk(parentElem, theRest, inList)
+
+    def detectTabbed(self, lines):
+        """ Find indented text and remove indent before further proccesing.
+
+        Keyword arguments:
+
+        * lines: an array of strings
+
+        Returns: a list of post processed items and the unused
+        remainder of the original list
+
+        """
+        items = []
+        item = -1
+        i = 0 # to keep track of where we are
+
+        def detab(line):
+            match = CORE_RE['tabbed'].match(line)
+            if match:
+               return match.group(4)
+
+        for line in lines:
+            if line.strip(): # Non-blank line
+                line = detab(line)
+                if line:
+                    items.append(line)
+                    i += 1
+                    continue
+                else:
+                    return items, lines[i:]
+
+            else: # Blank line: _maybe_ we are done.
+                i += 1 # advance
+
+                # Find the next non-blank line
+                for j in range(i, len(lines)):
+                    if lines[j].strip():
+                        next_line = lines[j]; break
+                else:
+                    break # There is no more text; we are done.
+
+                # Check if the next non-blank line is tabbed
+                if detab(next_line): # Yes, more work to do.
+                    items.append("")
+                    continue
+                else:
+                    break # No, we are done.
+        else:
+            i += 1
+
+        return items, lines[i:]
+
+class HeaderPreprocessor(markdown.Preprocessor):
+
+    """Replace underlined headers with hashed headers.
+
+    (To avoid the need for lookahead later.)
+
+    """
+
+    def run (self, lines):
+        i = -1
+        while i+1 < len(lines):
+            i = i+1
+            if not lines[i].strip():
+                continue
+
+            if lines[i].startswith("#"):
+                lines.insert(i+1, "\n")
+
+            if (i+1 <= len(lines)
+                  and lines[i+1]
+                  and lines[i+1][0] in ['-', '=']):
+
+                underline = lines[i+1].strip()
+
+                if underline == "="*len(underline):
+                    lines[i] = "# " + lines[i].strip()
+                    lines[i+1] = ""
+                elif underline == "-"*len(underline):
+                    lines[i] = "## " + lines[i].strip()
+                    lines[i+1] = ""
+
+        return lines
+
+
+class LinePreprocessor(markdown.Preprocessor):
+    """Convert HR lines to "___" format."""
+    blockquote_re = re.compile(r'^(> )+')
+
+    def run (self, lines):
+        for i in range(len(lines)):
+            prefix = ''
+            m = self.blockquote_re.search(lines[i])
+            if m:
+                prefix = m.group(0)
+            if self._isLine(lines[i][len(prefix):]):
+                lines[i] = prefix + "___"
+        return lines
+
+    def _isLine(self, block):
+        """Determine if a block should be replaced with an <HR>"""
+        if block.startswith("    "):
+            return False  # a code block
+        text = "".join([x for x in block if not x.isspace()])
+        if len(text) <= 2:
+            return False
+        for pattern in ['isline1', 'isline2', 'isline3']:
+            m = CORE_RE[pattern].match(text)
+            if (m and m.group(1)):
+                return True
+        else:
+            return False
+
+
+class LegacyExtension(markdown.Extension):
+    """ Replace Markdown's core parser. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Set the core parser to an instance of MarkdownParser. """
+        md.parser = MarkdownParser()
+        md.preprocessors.add ("header", HeaderPreprocessor(self), "<reference")
+        md.preprocessors.add("line",  LinePreprocessor(self), "<reference")
+ 
+
+def makeExtension(configs={}):
+    return LegacyExtension(configs=configs)
+

Added: packages-ng/markdown/extensions/meta.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/meta.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,90 @@
+#!usr/bin/python
+
+"""
+Meta Data Extension for Python-Markdown
+=======================================
+
+This extension adds Meta Data handling to markdown.
+
+Basic Usage:
+
+    >>> import markdown
+    >>> text = '''Title: A Test Doc.
+    ... Author: Waylan Limberg
+    ...         John Doe
+    ... Blank_Data:
+    ...
+    ... The body. This is paragraph one.
+    ... '''
+    >>> md = markdown.Markdown(['meta'])
+    >>> md.convert(text)
+    u'<p>The body. This is paragraph one.</p>'
+    >>> md.Meta
+    {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
+
+Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
+
+    >>> text = '    Some Code - not extra lines of meta data.'
+    >>> md = markdown.Markdown(['meta'])
+    >>> md.convert(text)
+    u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
+    >>> md.Meta
+    {}
+
+Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
+
+Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
+Contact: markdown at freewisdom.org
+
+License: BSD (see ../docs/LICENSE for details)
+
+"""
+
+import markdown, re
+
+# Global Vars
+META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
+META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
+
+class MetaExtension (markdown.Extension):
+    """ Meta-Data extension for Python-Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add MetaPreprocessor to Markdown instance. """
+
+        md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
+
+
+class MetaPreprocessor(markdown.preprocessors.Preprocessor):
+    """ Get Meta-Data. """
+
+    def run(self, lines):
+        """ Parse Meta-Data and store in Markdown.Meta. """
+        meta = {}
+        key = None
+        while 1:
+            line = lines.pop(0)
+            if line.strip() == '':
+                break # blank line - done
+            m1 = META_RE.match(line)
+            if m1:
+                key = m1.group('key').lower().strip()
+                meta[key] = [m1.group('value').strip()]
+            else:
+                m2 = META_MORE_RE.match(line)
+                if m2 and key:
+                    # Add another line to existing key
+                    meta[key].append(m2.group('value').strip())
+                else:
+                    lines.insert(0, line)
+                    break # no meta data - done
+        self.markdown.Meta = meta
+        return lines
+        
+
+def makeExtension(configs={}):
+    return MetaExtension(configs=configs)
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()

Added: packages-ng/markdown/extensions/rss.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/rss.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,114 @@
+import markdown
+from markdown import etree
+
+DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
+DEFAULT_CREATOR = "Yuri Takhteyev"
+DEFAULT_TITLE = "Markdown in Python"
+GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
+
+month_map = { "Jan" : "01",
+              "Feb" : "02",
+              "March" : "03",
+              "April" : "04",
+              "May" : "05",
+              "June" : "06",
+              "July" : "07",
+              "August" : "08",
+              "September" : "09",
+              "October" : "10",
+              "November" : "11",
+              "December" : "12" }
+
+def get_time(heading):
+
+    heading = heading.split("-")[0]
+    heading = heading.strip().replace(",", " ").replace(".", " ")
+
+    month, date, year = heading.split()
+    month = month_map[month]
+
+    return rdftime(" ".join((month, date, year, "12:00:00 AM")))
+
+def rdftime(time):
+
+    time = time.replace(":", " ")
+    time = time.replace("/", " ")
+    time = time.split()
+    return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
+                                        time[3], time[4], time[5])
+
+
+def get_date(text):
+    return "date"
+
+class RssExtension (markdown.Extension):
+
+    def extendMarkdown(self, md, md_globals):
+
+        self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
+                        'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
+                        'TITLE' : [DEFAULT_TITLE, "Feed title"] }
+
+        md.xml_mode = True
+        
+        # Insert a tree-processor that would actually add the title tag
+        treeprocessor = RssTreeProcessor(md)
+        treeprocessor.ext = self
+        md.treeprocessors['rss'] = treeprocessor
+        md.stripTopLevelTags = 0
+        md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
+
+class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
+
+    def run (self, root):
+
+        rss = etree.Element("rss")
+        rss.set("version", "2.0")
+
+        channel = etree.SubElement(rss, "channel")
+
+        for tag, text in (("title", self.ext.getConfig("TITLE")),
+                          ("link", self.ext.getConfig("URL")),
+                          ("description", None)):
+            
+            element = etree.SubElement(channel, tag)
+            element.text = text
+
+        for child in root:
+
+            if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
+      
+                heading = child.text.strip()
+                item = etree.SubElement(channel, "item")
+                link = etree.SubElement(item, "link")
+                link.text = self.ext.getConfig("URL")
+                title = etree.SubElement(item, "title")
+                title.text = heading
+
+                guid = ''.join([x for x in heading if x.isalnum()])
+                guidElem = etree.SubElement(item, "guid")
+                guidElem.text = guid
+                guidElem.set("isPermaLink", "false")
+
+            elif child.tag in ["p"]:
+                try:
+                    description = etree.SubElement(item, "description")
+                except UnboundLocalError:
+                    # Item not defined - moving on
+                    pass
+                else:
+                    if len(child):
+                        content = "\n".join([etree.tostring(node)
+                                             for node in child])
+                    else:
+                        content = child.text
+                    pholder = self.markdown.htmlStash.store(
+                                                "<![CDATA[ %s]]>" % content)
+                    description.text = pholder
+    
+        return rss
+
+
+def makeExtension(configs):
+
+    return RssExtension(configs)

Added: packages-ng/markdown/extensions/tables.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/tables.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,97 @@
+#!/usr/bin/env Python
+"""
+Tables Extension for Python-Markdown
+====================================
+
+Added parsing of tables to Python-Markdown.
+
+A simple example:
+
+    First Header  | Second Header
+    ------------- | -------------
+    Content Cell  | Content Cell
+    Content Cell  | Content Cell
+
+Copyright 2009 - [Waylan Limberg](http://achinghead.com)
+"""
+import markdown
+from markdown import etree
+
+
+class TableProcessor(markdown.blockprocessors.BlockProcessor):
+    """ Process Tables. """
+
+    def test(self, parent, block):
+        rows = block.split('\n')
+        return (len(rows) > 2 and '|' in rows[0] and 
+                '|' in rows[1] and '-' in rows[1] and 
+                rows[1][0] in ['|', ':', '-'])
+
+    def run(self, parent, blocks):
+        """ Parse a table block and build table. """
+        block = blocks.pop(0).split('\n')
+        header = block[:2]
+        rows = block[2:]
+        # Get format type (bordered by pipes or not)
+        border = False
+        if header[0].startswith('|'):
+            border = True
+        # Get alignment of columns
+        align = []
+        for c in self._split_row(header[1], border):
+            if c.startswith(':') and c.endswith(':'):
+                align.append('center')
+            elif c.startswith(':'):
+                align.append('left')
+            elif c.endswith(':'):
+                align.append('right')
+            else:
+                align.append(None)
+        # Build table
+        table = etree.SubElement(parent, 'table')
+        thead = etree.SubElement(table, 'thead')
+        self._build_row(header[0], thead, align, border)
+        tbody = etree.SubElement(table, 'tbody')
+        for row in rows:
+            self._build_row(row, tbody, align, border)
+
+    def _build_row(self, row, parent, align, border):
+        """ Given a row of text, build table cells. """
+        tr = etree.SubElement(parent, 'tr')
+        tag = 'td'
+        if parent.tag == 'thead':
+            tag = 'th'
+        cells = self._split_row(row, border)
+        # We use align here rather than cells to ensure every row 
+        # contains the same number of columns.
+        for i, a in enumerate(align):
+            c = etree.SubElement(tr, tag)
+            try:
+                c.text = cells[i].strip()
+            except IndexError:
+                c.text = ""
+            if a:
+                c.set('align', a)
+
+    def _split_row(self, row, border):
+        """ split a row of text into list of cells. """
+        if border:
+            if row.startswith('|'):
+                row = row[1:]
+            if row.endswith('|'):
+                row = row[:-1]
+        return row.split('|')
+
+
+class TableExtension(markdown.Extension):
+    """ Add tables to Markdown. """
+
+    def extendMarkdown(self, md, md_globals):
+        """ Add an instance of TableProcessor to BlockParser. """
+        md.parser.blockprocessors.add('table', 
+                                      TableProcessor(md.parser),
+                                      '<hashheader')
+
+
+def makeExtension(configs={}):
+    return TableExtension(configs=configs)

Added: packages-ng/markdown/extensions/toc.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/toc.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,140 @@
+"""
+Table of Contents Extension for Python-Markdown
+* * *
+
+(c) 2008 [Jack Miller](http://codezen.org)
+
+Dependencies:
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+
+"""
+import markdown
+from markdown import etree
+import re
+
+class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
+    # Iterator wrapper to get parent and child all at once
+    def iterparent(self, root):
+        for parent in root.getiterator():
+            for child in parent:
+                yield parent, child
+
+    def run(self, doc):
+        div = etree.Element("div")
+        div.attrib["class"] = "toc"
+        last_li = None
+
+        # Add title to the div
+        if self.config["title"][0]:
+            header = etree.SubElement(div, "span")
+            header.attrib["class"] = "toctitle"
+            header.text = self.config["title"][0]
+
+        level = 0
+        list_stack=[div]
+        header_rgx = re.compile("[Hh][123456]")
+
+        # Get a list of id attributes
+        used_ids = []
+        for c in doc.getiterator():
+            if "id" in c.attrib:
+                used_ids.append(c.attrib["id"])
+
+        for (p, c) in self.iterparent(doc):
+            if not c.text:
+                continue
+
+            # To keep the output from screwing up the
+            # validation by putting a <div> inside of a <p>
+            # we actually replace the <p> in its entirety.
+            # We do not allow the marker inside a header as that
+            # would causes an enless loop of placing a new TOC 
+            # inside previously generated TOC.
+
+            if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
+                for i in range(len(p)):
+                    if p[i] == c:
+                        p[i] = div
+                        break
+                    
+            if header_rgx.match(c.tag):
+                tag_level = int(c.tag[-1])
+                
+                # Regardless of how many levels we jumped
+                # only one list should be created, since
+                # empty lists containing lists are illegal.
+    
+                if tag_level < level:
+                    list_stack.pop()
+                    level = tag_level
+
+                if tag_level > level:
+                    newlist = etree.Element("ul")
+                    if last_li:
+                        last_li.append(newlist)
+                    else:
+                        list_stack[-1].append(newlist)
+                    list_stack.append(newlist)
+                    level = tag_level
+
+                # Do not override pre-existing ids 
+                if not "id" in c.attrib:
+                    id = self.config["slugify"][0](c.text)
+                    if id in used_ids:
+                        ctr = 1
+                        while "%s_%d" % (id, ctr) in used_ids:
+                            ctr += 1
+                        id = "%s_%d" % (id, ctr)
+                    used_ids.append(id)
+                    c.attrib["id"] = id
+                else:
+                    id = c.attrib["id"]
+
+                # List item link, to be inserted into the toc div
+                last_li = etree.Element("li")
+                link = etree.SubElement(last_li, "a")
+                link.text = c.text
+                link.attrib["href"] = '#' + id
+
+                if int(self.config["anchorlink"][0]):
+                    anchor = etree.SubElement(c, "a")
+                    anchor.text = c.text
+                    anchor.attrib["href"] = "#" + id
+                    anchor.attrib["class"] = "toclink"
+                    c.text = ""
+
+                list_stack[-1].append(last_li)
+
+class TocExtension(markdown.Extension):
+    def __init__(self, configs):
+        self.config = { "marker" : ["[TOC]", 
+                            "Text to find and replace with Table of Contents -"
+                            "Defaults to \"[TOC]\""],
+                        "slugify" : [self.slugify,
+                            "Function to generate anchors based on header text-"
+                            "Defaults to a built in slugify function."],
+                        "title" : [None,
+                            "Title to insert into TOC <div> - "
+                            "Defaults to None"],
+                        "anchorlink" : [0,
+                            "1 if header should be a self link"
+                            "Defaults to 0"]}
+
+        for key, value in configs:
+            self.setConfig(key, value)
+
+    # This is exactly the same as Django's slugify
+    def slugify(self, value):
+        """ Slugify a string, to make it URL friendly. """
+        import unicodedata
+        value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
+        value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
+        return re.sub('[-\s]+','-',value)
+
+    def extendMarkdown(self, md, md_globals):
+        tocext = TocTreeprocessor(md)
+        tocext.config = self.config
+        md.treeprocessors.add("toc", tocext, "_begin")
+	
+def makeExtension(configs={}):
+    return TocExtension(configs=configs)

Added: packages-ng/markdown/extensions/wikilinks.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/extensions/wikilinks.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+
+'''
+WikiLinks Extension for Python-Markdown
+======================================
+
+Converts [[WikiLinks]] to relative links.  Requires Python-Markdown 2.0+
+
+Basic usage:
+
+    >>> import markdown
+    >>> text = "Some text with a [[WikiLink]]."
+    >>> html = markdown.markdown(text, ['wikilinks'])
+    >>> html
+    u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
+
+Whitespace behavior:
+
+    >>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
+    u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
+    >>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
+    u'<p>foo  bar</p>'
+
+To define custom settings the simple way:
+
+    >>> markdown.markdown(text, 
+    ...     ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
+    ... )
+    u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
+    
+Custom settings the complex way:
+
+    >>> md = markdown.Markdown(
+    ...     extensions = ['wikilinks'], 
+    ...     extension_configs = {'wikilinks': [
+    ...                                 ('base_url', 'http://example.com/'), 
+    ...                                 ('end_url', '.html'),
+    ...                                 ('html_class', '') ]},
+    ...     safe_mode = True)
+    >>> md.convert(text)
+    u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
+
+Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
+
+    >>> text = """wiki_base_url: http://example.com/
+    ... wiki_end_url:   .html
+    ... wiki_html_class:
+    ...
+    ... Some text with a [[WikiLink]]."""
+    >>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
+    >>> md.convert(text)
+    u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
+
+MetaData should not carry over to next document:
+
+    >>> md.convert("No [[MetaData]] here.")
+    u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
+
+From the command line:
+
+    python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
+
+By [Waylan Limberg](http://achinghead.com/).
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php) 
+
+Dependencies:
+* [Python 2.3+](http://python.org)
+* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
+'''
+
+import markdown
+
+class WikiLinkExtension(markdown.Extension):
+    def __init__(self, configs):
+        # set extension defaults
+        self.config = {
+                        'base_url' : ['/', 'String to append to beginning or URL.'],
+                        'end_url' : ['/', 'String to append to end of URL.'],
+                        'html_class' : ['wikilink', 'CSS hook. Leave blank for none.']
+        }
+        
+        # Override defaults with user settings
+        for key, value in configs :
+            self.setConfig(key, value)
+        
+    def extendMarkdown(self, md, md_globals):
+        self.md = md
+    
+        # append to end of inline patterns
+        WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
+        wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
+        wikilinkPattern.md = md
+        md.inlinePatterns.add('wikilink', wikilinkPattern, "_end")
+        
+
+class WikiLinks(markdown.inlinepatterns.Pattern):
+    def __init__(self, pattern, config):
+        markdown.inlinepatterns.Pattern.__init__(self, pattern)
+        self.config = config
+  
+    def handleMatch(self, m):
+        if m.group(2).strip():
+            base_url, end_url, html_class = self._getMeta()
+            label = m.group(2).strip()
+            url = '%s%s%s'% (base_url, label.replace(' ', '_'), end_url)
+            a = markdown.etree.Element('a')
+            a.text = markdown.AtomicString(label)
+            a.set('href', url)
+            if html_class:
+                a.set('class', html_class)
+        else:
+            a = ''
+        return a
+
+    def _getMeta(self):
+        """ Return meta data or config data. """
+        base_url = self.config['base_url'][0]
+        end_url = self.config['end_url'][0]
+        html_class = self.config['html_class'][0]
+        if hasattr(self.md, 'Meta'):
+            if self.md.Meta.has_key('wiki_base_url'):
+                base_url = self.md.Meta['wiki_base_url'][0]
+            if self.md.Meta.has_key('wiki_end_url'):
+                end_url = self.md.Meta['wiki_end_url'][0]
+            if self.md.Meta.has_key('wiki_html_class'):
+                html_class = self.md.Meta['wiki_html_class'][0]
+        return base_url, end_url, html_class
+    
+
+def makeExtension(configs=None) :
+    return WikiLinkExtension(configs=configs)
+
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
+

Added: packages-ng/markdown/html4.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/html4.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,274 @@
+# markdown/html4.py
+#
+# Add html4 serialization to older versions of Elementree
+# Taken from ElementTree 1.3 preview with slight modifications
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh.  All rights reserved.
+#
+# fredrik at pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS.  IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+
+import markdown
+ElementTree = markdown.etree.ElementTree
+QName = markdown.etree.QName
+Comment = markdown.etree.Comment
+PI = markdown.etree.PI
+ProcessingInstruction = markdown.etree.ProcessingInstruction
+
+HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
+              "img", "input", "isindex", "link", "meta" "param")
+
+try:
+    HTML_EMPTY = set(HTML_EMPTY)
+except NameError:
+    pass
+
+_namespace_map = {
+    # "well-known" namespace prefixes
+    "http://www.w3.org/XML/1998/namespace": "xml",
+    "http://www.w3.org/1999/xhtml": "html",
+    "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
+    "http://schemas.xmlsoap.org/wsdl/": "wsdl",
+    # xml schema
+    "http://www.w3.org/2001/XMLSchema": "xs",
+    "http://www.w3.org/2001/XMLSchema-instance": "xsi",
+    # dublic core
+    "http://purl.org/dc/elements/1.1/": "dc",
+}
+
+
+def _raise_serialization_error(text):
+    raise TypeError(
+        "cannot serialize %r (type %s)" % (text, type(text).__name__)
+        )
+
+def _encode(text, encoding):
+    try:
+        return text.encode(encoding, "xmlcharrefreplace")
+    except (TypeError, AttributeError):
+        _raise_serialization_error(text)
+
+def _escape_cdata(text, encoding):
+    # escape character data
+    try:
+        # it's worth avoiding do-nothing calls for strings that are
+        # shorter than 500 character, or so.  assume that's, by far,
+        # the most common case in most applications.
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if "<" in text:
+            text = text.replace("<", "&lt;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        return text.encode(encoding, "xmlcharrefreplace")
+    except (TypeError, AttributeError):
+        _raise_serialization_error(text)
+
+
+def _escape_attrib(text, encoding):
+    # escape attribute value
+    try:
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if "<" in text:
+            text = text.replace("<", "&lt;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        if "\"" in text:
+            text = text.replace("\"", "&quot;")
+        if "\n" in text:
+            text = text.replace("\n", "&#10;")
+        return text.encode(encoding, "xmlcharrefreplace")
+    except (TypeError, AttributeError):
+        _raise_serialization_error(text)
+
+def _escape_attrib_html(text, encoding):
+    # escape attribute value
+    try:
+        if "&" in text:
+            text = text.replace("&", "&amp;")
+        if ">" in text:
+            text = text.replace(">", "&gt;")
+        if "\"" in text:
+            text = text.replace("\"", "&quot;")
+        return text.encode(encoding, "xmlcharrefreplace")
+    except (TypeError, AttributeError):
+        _raise_serialization_error(text)
+
+
+def _serialize_html(write, elem, encoding, qnames, namespaces):
+    tag = elem.tag
+    text = elem.text
+    if tag is Comment:
+        write("<!--%s-->" % _escape_cdata(text, encoding))
+    elif tag is ProcessingInstruction:
+        write("<?%s?>" % _escape_cdata(text, encoding))
+    else:
+        tag = qnames[tag]
+        if tag is None:
+            if text:
+                write(_escape_cdata(text, encoding))
+            for e in elem:
+                _serialize_html(write, e, encoding, qnames, None)
+        else:
+            write("<" + tag)
+            items = elem.items()
+            if items or namespaces:
+                items.sort() # lexical order
+                for k, v in items:
+                    if isinstance(k, QName):
+                        k = k.text
+                    if isinstance(v, QName):
+                        v = qnames[v.text]
+                    else:
+                        v = _escape_attrib_html(v, encoding)
+                    # FIXME: handle boolean attributes
+                    write(" %s=\"%s\"" % (qnames[k], v))
+                if namespaces:
+                    items = namespaces.items()
+                    items.sort(key=lambda x: x[1]) # sort on prefix
+                    for v, k in items:
+                        if k:
+                            k = ":" + k
+                        write(" xmlns%s=\"%s\"" % (
+                            k.encode(encoding),
+                            _escape_attrib(v, encoding)
+                            ))
+            write(">")
+            tag = tag.lower()
+            if text:
+                if tag == "script" or tag == "style":
+                    write(_encode(text, encoding))
+                else:
+                    write(_escape_cdata(text, encoding))
+            for e in elem:
+                _serialize_html(write, e, encoding, qnames, None)
+            if tag not in HTML_EMPTY:
+                write("</" + tag + ">")
+    if elem.tail:
+        write(_escape_cdata(elem.tail, encoding))
+
+def write_html(root, f,
+          # keyword arguments
+          encoding="us-ascii",
+          default_namespace=None):
+    assert root is not None
+    if not hasattr(f, "write"):
+        f = open(f, "wb")
+    write = f.write
+    if not encoding:
+        encoding = "us-ascii"
+    qnames, namespaces = _namespaces(
+            root, encoding, default_namespace
+            )
+    _serialize_html(
+                write, root, encoding, qnames, namespaces
+                )
+
+# --------------------------------------------------------------------
+# serialization support
+
+def _namespaces(elem, encoding, default_namespace=None):
+    # identify namespaces used in this tree
+
+    # maps qnames to *encoded* prefix:local names
+    qnames = {None: None}
+
+    # maps uri:s to prefixes
+    namespaces = {}
+    if default_namespace:
+        namespaces[default_namespace] = ""
+
+    def encode(text):
+        return text.encode(encoding)
+
+    def add_qname(qname):
+        # calculate serialized qname representation
+        try:
+            if qname[:1] == "{":
+                uri, tag = qname[1:].split("}", 1)
+                prefix = namespaces.get(uri)
+                if prefix is None:
+                    prefix = _namespace_map.get(uri)
+                    if prefix is None:
+                        prefix = "ns%d" % len(namespaces)
+                    if prefix != "xml":
+                        namespaces[uri] = prefix
+                if prefix:
+                    qnames[qname] = encode("%s:%s" % (prefix, tag))
+                else:
+                    qnames[qname] = encode(tag) # default element
+            else:
+                if default_namespace:
+                    # FIXME: can this be handled in XML 1.0?
+                    raise ValueError(
+                        "cannot use non-qualified names with "
+                        "default_namespace option"
+                        )
+                qnames[qname] = encode(qname)
+        except TypeError:
+            _raise_serialization_error(qname)
+
+    # populate qname and namespaces table
+    try:
+        iterate = elem.iter
+    except AttributeError:
+        iterate = elem.getiterator # cET compatibility
+    for elem in iterate():
+        tag = elem.tag
+        if isinstance(tag, QName) and tag.text not in qnames:
+            add_qname(tag.text)
+        elif isinstance(tag, basestring):
+            if tag not in qnames:
+                add_qname(tag)
+        elif tag is not None and tag is not Comment and tag is not PI:
+            _raise_serialization_error(tag)
+        for key, value in elem.items():
+            if isinstance(key, QName):
+                key = key.text
+            if key not in qnames:
+                add_qname(key)
+            if isinstance(value, QName) and value.text not in qnames:
+                add_qname(value.text)
+        text = elem.text
+        if isinstance(text, QName) and text.text not in qnames:
+            add_qname(text.text)
+    return qnames, namespaces
+
+def to_html_string(element, encoding=None):
+    class dummy:
+        pass
+    data = []
+    file = dummy()
+    file.write = data.append
+    write_html(ElementTree(element).getroot(),file,encoding)
+    return "".join(data)

Added: packages-ng/markdown/inlinepatterns.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/inlinepatterns.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,371 @@
+"""
+INLINE PATTERNS
+=============================================================================
+
+Inline patterns such as *emphasis* are handled by means of auxiliary
+objects, one per pattern.  Pattern objects must be instances of classes
+that extend markdown.Pattern.  Each pattern object uses a single regular
+expression and needs support the following methods:
+
+    pattern.getCompiledRegExp() # returns a regular expression
+
+    pattern.handleMatch(m) # takes a match object and returns
+                           # an ElementTree element or just plain text
+
+All of python markdown's built-in patterns subclass from Pattern,
+but you can add additional patterns that don't.
+
+Also note that all the regular expressions used by inline must
+capture the whole block.  For this reason, they all start with
+'^(.*)' and end with '(.*)!'.  In case with built-in expression
+Pattern takes care of adding the "^(.*)" and "(.*)!".
+
+Finally, the order in which regular expressions are applied is very
+important - e.g. if we first replace http://.../ links with <a> tags
+and _then_ try to replace inline html, we would end up with a mess.
+So, we apply the expressions in the following order:
+
+* escape and backticks have to go before everything else, so
+  that we can preempt any markdown patterns by escaping them.
+
+* then we handle auto-links (must be done before inline html)
+
+* then we handle inline HTML.  At this point we will simply
+  replace all inline HTML strings with a placeholder and add
+  the actual HTML to a hash.
+
+* then inline images (must be done before links)
+
+* then bracketed links, first regular then reference-style
+
+* finally we apply strong and emphasis
+"""
+
+import markdown
+import re
+from urlparse import urlparse, urlunparse
+import sys
+if sys.version >= "3.0":
+    from html import entities as htmlentitydefs
+else:
+    import htmlentitydefs
+
+"""
+The actual regular expressions for patterns
+-----------------------------------------------------------------------------
+"""
+
+NOBRACKET = r'[^\]\[]*'
+BRK = ( r'\[('
+        + (NOBRACKET + r'(\[')*6
+        + (NOBRACKET+ r'\])*')*6
+        + NOBRACKET + r')\]' )
+NOIMG = r'(?<!\!)'
+
+BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
+ESCAPE_RE = r'\\(.)'                             # \<
+EMPHASIS_RE = r'(\*)([^\*]*)\2'                    # *emphasis*
+STRONG_RE = r'(\*{2}|_{2})(.*?)\2'                      # **strong**
+STRONG_EM_RE = r'(\*{3}|_{3})(.*?)\2'            # ***strong***
+
+if markdown.SMART_EMPHASIS:
+    EMPHASIS_2_RE = r'(?<!\S)(_)(\S.*?)\2'        # _emphasis_
+else:
+    EMPHASIS_2_RE = r'(_)(.*?)\2'                 # _emphasis_
+
+LINK_RE = NOIMG + BRK + \
+r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)'''
+# [text](url) or [text](<url>)
+
+IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
+# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>)
+REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]'           # [Google][3]
+IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
+NOT_STRONG_RE = r'( \* )'                        # stand-alone * or _
+AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>'        # <http://www.123.com>
+AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'               # <me at example.com>
+
+HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'               # <...>
+ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'               # &amp;
+LINE_BREAK_RE = r'  \n'                     # two spaces at end of line
+LINE_BREAK_2_RE = r'  $'                    # two spaces at end of text
+
+
+def dequote(string):
+    """Remove quotes from around a string."""
+    if ( ( string.startswith('"') and string.endswith('"'))
+         or (string.startswith("'") and string.endswith("'")) ):
+        return string[1:-1]
+    else:
+        return string
+
+ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
+
+def handleAttributes(text, parent):
+    """Set values of an element based on attribute definitions ({@id=123})."""
+    def attributeCallback(match):
+        parent.set(match.group(1), match.group(2).replace('\n', ' '))
+    return ATTR_RE.sub(attributeCallback, text)
+
+
+"""
+The pattern classes
+-----------------------------------------------------------------------------
+"""
+
+class Pattern:
+    """Base class that inline patterns subclass. """
+
+    def __init__ (self, pattern, markdown_instance=None):
+        """
+        Create an instant of an inline pattern.
+
+        Keyword arguments:
+
+        * pattern: A regular expression that matches a pattern
+
+        """
+        self.pattern = pattern
+        self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
+
+        # Api for Markdown to pass safe_mode into instance
+        self.safe_mode = False
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+    def getCompiledRegExp (self):
+        """ Return a compiled regular expression. """
+        return self.compiled_re
+
+    def handleMatch(self, m):
+        """Return a ElementTree element from the given match.
+
+        Subclasses should override this method.
+
+        Keyword arguments:
+
+        * m: A re match object containing a match of the pattern.
+
+        """
+        pass
+
+    def type(self):
+        """ Return class name, to define pattern type """
+        return self.__class__.__name__
+
+BasePattern = Pattern # for backward compatibility
+
+class SimpleTextPattern (Pattern):
+    """ Return a simple text of group(2) of a Pattern. """
+    def handleMatch(self, m):
+        text = m.group(2)
+        if text == markdown.INLINE_PLACEHOLDER_PREFIX:
+            return None
+        return text
+
+class SimpleTagPattern (Pattern):
+    """
+    Return element of type `tag` with a text attribute of group(3)
+    of a Pattern.
+
+    """
+    def __init__ (self, pattern, tag):
+        Pattern.__init__(self, pattern)
+        self.tag = tag
+
+    def handleMatch(self, m):
+        el = markdown.etree.Element(self.tag)
+        el.text = m.group(3)
+        return el
+
+
+class SubstituteTagPattern (SimpleTagPattern):
+    """ Return a eLement of type `tag` with no children. """
+    def handleMatch (self, m):
+        return markdown.etree.Element(self.tag)
+
+
+class BacktickPattern (Pattern):
+    """ Return a `<code>` element containing the matching text. """
+    def __init__ (self, pattern):
+        Pattern.__init__(self, pattern)
+        self.tag = "code"
+
+    def handleMatch(self, m):
+        el = markdown.etree.Element(self.tag)
+        el.text = markdown.AtomicString(m.group(3).strip())
+        return el
+
+
+class DoubleTagPattern (SimpleTagPattern):
+    """Return a ElementTree element nested in tag2 nested in tag1.
+
+    Useful for strong emphasis etc.
+
+    """
+    def handleMatch(self, m):
+        tag1, tag2 = self.tag.split(",")
+        el1 = markdown.etree.Element(tag1)
+        el2 = markdown.etree.SubElement(el1, tag2)
+        el2.text = m.group(3)
+        return el1
+
+
+class HtmlPattern (Pattern):
+    """ Store raw inline html and return a placeholder. """
+    def handleMatch (self, m):
+        rawhtml = m.group(2)
+        inline = True
+        place_holder = self.markdown.htmlStash.store(rawhtml)
+        return place_holder
+
+
+class LinkPattern (Pattern):
+    """ Return a link element from the given match. """
+    def handleMatch(self, m):
+        el = markdown.etree.Element("a")
+        el.text = m.group(2)
+        title = m.group(11)
+        href = m.group(9)
+
+        if href:
+            if href[0] == "<":
+                href = href[1:-1]
+            el.set("href", self.sanitize_url(href.strip()))
+        else:
+            el.set("href", "")
+
+        if title:
+            title = dequote(title) #.replace('"', "&quot;")
+            el.set("title", title)
+        return el
+
+    def sanitize_url(self, url):
+        """
+        Sanitize a url against xss attacks in "safe_mode".
+
+        Rather than specifically blacklisting `javascript:alert("XSS")` and all
+        its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
+        safe url formats. Most urls contain a network location, however some
+        are known not to (i.e.: mailto links). Script urls do not contain a
+        location. Additionally, for `javascript:...`, the scheme would be
+        "javascript" but some aliases will appear to `urlparse()` to have no
+        scheme. On top of that relative links (i.e.: "foo/bar.html") have no
+        scheme. Therefore we must check "path", "parameters", "query" and
+        "fragment" for any literal colons. We don't check "scheme" for colons
+        because it *should* never have any and "netloc" must allow the form:
+        `username:password at host:port`.
+
+        """
+        locless_schemes = ['', 'mailto', 'news']
+        scheme, netloc, path, params, query, fragment = url = urlparse(url)
+        safe_url = False
+        if netloc != '' or scheme in locless_schemes:
+            safe_url = True
+
+        for part in url[2:]:
+            if ":" in part:
+                safe_url = False
+
+        if self.markdown.safeMode and not safe_url:
+            return ''
+        else:
+            return urlunparse(url)
+
+class ImagePattern(LinkPattern):
+    """ Return a img element from the given match. """
+    def handleMatch(self, m):
+        el = markdown.etree.Element("img")
+        src_parts = m.group(9).split()
+        if src_parts:
+            src = src_parts[0]
+            if src[0] == "<" and src[-1] == ">":
+                src = src[1:-1]
+            el.set('src', self.sanitize_url(src))
+        else:
+            el.set('src', "")
+        if len(src_parts) > 1:
+            el.set('title', dequote(" ".join(src_parts[1:])))
+
+        if markdown.ENABLE_ATTRIBUTES:
+            truealt = handleAttributes(m.group(2), el)
+        else:
+            truealt = m.group(2)
+
+        el.set('alt', truealt)
+        return el
+
+class ReferencePattern(LinkPattern):
+    """ Match to a stored reference and return link element. """
+    def handleMatch(self, m):
+        if m.group(9):
+            id = m.group(9).lower()
+        else:
+            # if we got something like "[Google][]"
+            # we'll use "google" as the id
+            id = m.group(2).lower()
+
+        if not id in self.markdown.references: # ignore undefined refs
+            return None
+        href, title = self.markdown.references[id]
+
+        text = m.group(2)
+        return self.makeTag(href, title, text)
+
+    def makeTag(self, href, title, text):
+        el = markdown.etree.Element('a')
+
+        el.set('href', self.sanitize_url(href))
+        if title:
+            el.set('title', title)
+
+        el.text = text
+        return el
+
+
+class ImageReferencePattern (ReferencePattern):
+    """ Match to a stored reference and return img element. """
+    def makeTag(self, href, title, text):
+        el = markdown.etree.Element("img")
+        el.set("src", self.sanitize_url(href))
+        if title:
+            el.set("title", title)
+        el.set("alt", text)
+        return el
+
+
+class AutolinkPattern (Pattern):
+    """ Return a link Element given an autolink (`<http://example/com>`). """
+    def handleMatch(self, m):
+        el = markdown.etree.Element("a")
+        el.set('href', m.group(2))
+        el.text = markdown.AtomicString(m.group(2))
+        return el
+
+class AutomailPattern (Pattern):
+    """
+    Return a mailto link Element given an automail link (`<foo at example.com>`).
+    """
+    def handleMatch(self, m):
+        el = markdown.etree.Element('a')
+        email = m.group(2)
+        if email.startswith("mailto:"):
+            email = email[len("mailto:"):]
+
+        def codepoint2name(code):
+            """Return entity definition by code, or the code if not defined."""
+            entity = htmlentitydefs.codepoint2name.get(code)
+            if entity:
+                return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
+            else:
+                return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
+
+        letters = [codepoint2name(ord(letter)) for letter in email]
+        el.text = markdown.AtomicString(''.join(letters))
+
+        mailto = "mailto:" + email
+        mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
+                          ord(letter) for letter in mailto])
+        el.set('href', mailto)
+        return el
+

Added: packages-ng/markdown/odict.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/odict.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,162 @@
+class OrderedDict(dict):
+    """
+    A dictionary that keeps its keys in the order in which they're inserted.
+    
+    Copied from Django's SortedDict with some modifications.
+
+    """
+    def __new__(cls, *args, **kwargs):
+        instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
+        instance.keyOrder = []
+        return instance
+
+    def __init__(self, data=None):
+        if data is None:
+            data = {}
+        super(OrderedDict, self).__init__(data)
+        if isinstance(data, dict):
+            self.keyOrder = data.keys()
+        else:
+            self.keyOrder = []
+            for key, value in data:
+                if key not in self.keyOrder:
+                    self.keyOrder.append(key)
+
+    def __deepcopy__(self, memo):
+        from copy import deepcopy
+        return self.__class__([(key, deepcopy(value, memo))
+                               for key, value in self.iteritems()])
+
+    def __setitem__(self, key, value):
+        super(OrderedDict, self).__setitem__(key, value)
+        if key not in self.keyOrder:
+            self.keyOrder.append(key)
+
+    def __delitem__(self, key):
+        super(OrderedDict, self).__delitem__(key)
+        self.keyOrder.remove(key)
+
+    def __iter__(self):
+        for k in self.keyOrder:
+            yield k
+
+    def pop(self, k, *args):
+        result = super(OrderedDict, self).pop(k, *args)
+        try:
+            self.keyOrder.remove(k)
+        except ValueError:
+            # Key wasn't in the dictionary in the first place. No problem.
+            pass
+        return result
+
+    def popitem(self):
+        result = super(OrderedDict, self).popitem()
+        self.keyOrder.remove(result[0])
+        return result
+
+    def items(self):
+        return zip(self.keyOrder, self.values())
+
+    def iteritems(self):
+        for key in self.keyOrder:
+            yield key, super(OrderedDict, self).__getitem__(key)
+
+    def keys(self):
+        return self.keyOrder[:]
+
+    def iterkeys(self):
+        return iter(self.keyOrder)
+
+    def values(self):
+        return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
+
+    def itervalues(self):
+        for key in self.keyOrder:
+            yield super(OrderedDict, self).__getitem__(key)
+
+    def update(self, dict_):
+        for k, v in dict_.items():
+            self.__setitem__(k, v)
+
+    def setdefault(self, key, default):
+        if key not in self.keyOrder:
+            self.keyOrder.append(key)
+        return super(OrderedDict, self).setdefault(key, default)
+
+    def value_for_index(self, index):
+        """Return the value of the item at the given zero-based index."""
+        return self[self.keyOrder[index]]
+
+    def insert(self, index, key, value):
+        """Insert the key, value pair before the item with the given index."""
+        if key in self.keyOrder:
+            n = self.keyOrder.index(key)
+            del self.keyOrder[n]
+            if n < index:
+                index -= 1
+        self.keyOrder.insert(index, key)
+        super(OrderedDict, self).__setitem__(key, value)
+
+    def copy(self):
+        """Return a copy of this object."""
+        # This way of initializing the copy means it works for subclasses, too.
+        obj = self.__class__(self)
+        obj.keyOrder = self.keyOrder[:]
+        return obj
+
+    def __repr__(self):
+        """
+        Replace the normal dict.__repr__ with a version that returns the keys
+        in their sorted order.
+        """
+        return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
+
+    def clear(self):
+        super(OrderedDict, self).clear()
+        self.keyOrder = []
+
+    def index(self, key):
+        """ Return the index of a given key. """
+        return self.keyOrder.index(key)
+
+    def index_for_location(self, location):
+        """ Return index or None for a given location. """
+        if location == '_begin':
+            i = 0
+        elif location == '_end':
+            i = None
+        elif location.startswith('<') or location.startswith('>'):
+            i = self.index(location[1:])
+            if location.startswith('>'):
+                if i >= len(self):
+                    # last item
+                    i = None
+                else:
+                    i += 1
+        else:
+            raise ValueError('Not a valid location: "%s". Location key '
+                             'must start with a ">" or "<".' % location)
+        return i
+
+    def add(self, key, value, location):
+        """ Insert by key location. """
+        i = self.index_for_location(location)
+        if i is not None:
+            self.insert(i, key, value)
+        else:
+            self.__setitem__(key, value)
+
+    def link(self, key, location):
+        """ Change location of an existing item. """
+        n = self.keyOrder.index(key)
+        del self.keyOrder[n]
+        i = self.index_for_location(location)
+        try:
+            if i is not None:
+                self.keyOrder.insert(i, key)
+            else:
+                self.keyOrder.append(key)
+        except Error:
+            # restore to prevent data loss and reraise
+            self.keyOrder.insert(n, key)
+            raise Error

Added: packages-ng/markdown/postprocessors.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/postprocessors.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,77 @@
+"""
+POST-PROCESSORS
+=============================================================================
+
+Markdown also allows post-processors, which are similar to preprocessors in
+that they need to implement a "run" method. However, they are run after core
+processing.
+
+"""
+
+
+import markdown
+
+class Processor:
+    def __init__(self, markdown_instance=None):
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+class Postprocessor(Processor):
+    """
+    Postprocessors are run after the ElementTree it converted back into text.
+
+    Each Postprocessor implements a "run" method that takes a pointer to a
+    text string, modifies it as necessary and returns a text string.
+
+    Postprocessors must extend markdown.Postprocessor.
+
+    """
+
+    def run(self, text):
+        """
+        Subclasses of Postprocessor should implement a `run` method, which
+        takes the html document as a single text string and returns a
+        (possibly modified) string.
+
+        """
+        pass
+
+
+class RawHtmlPostprocessor(Postprocessor):
+    """ Restore raw html to the document. """
+
+    def run(self, text):
+        """ Iterate over html stash and restore "safe" html. """
+        for i in range(self.markdown.htmlStash.html_counter):
+            html, safe  = self.markdown.htmlStash.rawHtmlBlocks[i]
+            if self.markdown.safeMode and not safe:
+                if str(self.markdown.safeMode).lower() == 'escape':
+                    html = self.escape(html)
+                elif str(self.markdown.safeMode).lower() == 'remove':
+                    html = ''
+                else:
+                    html = markdown.HTML_REMOVED_TEXT
+            if safe or not self.markdown.safeMode:
+                text = text.replace("<p>%s</p>" % 
+                            (markdown.preprocessors.HTML_PLACEHOLDER % i),
+                            html + "\n")
+            text =  text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i, 
+                                 html)
+        return text
+
+    def escape(self, html):
+        """ Basic html escaping """
+        html = html.replace('&', '&amp;')
+        html = html.replace('<', '&lt;')
+        html = html.replace('>', '&gt;')
+        return html.replace('"', '&quot;')
+
+
+class AndSubstitutePostprocessor(Postprocessor):
+    """ Restore valid entities """
+    def __init__(self):
+        pass
+
+    def run(self, text):
+        text =  text.replace(markdown.AMP_SUBSTITUTE, "&")
+        return text

Added: packages-ng/markdown/preprocessors.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/preprocessors.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,214 @@
+
+"""
+PRE-PROCESSORS
+=============================================================================
+
+Preprocessors work on source text before we start doing anything too
+complicated. 
+"""
+
+import re
+import markdown
+
+HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
+HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
+
+class Processor:
+    def __init__(self, markdown_instance=None):
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+class Preprocessor (Processor):
+    """
+    Preprocessors are run after the text is broken into lines.
+
+    Each preprocessor implements a "run" method that takes a pointer to a
+    list of lines of the document, modifies it as necessary and returns
+    either the same pointer or a pointer to a new list.
+
+    Preprocessors must extend markdown.Preprocessor.
+
+    """
+    def run(self, lines):
+        """
+        Each subclass of Preprocessor should override the `run` method, which
+        takes the document as a list of strings split by newlines and returns
+        the (possibly modified) list of lines.
+
+        """
+        pass
+
+class HtmlStash:
+    """
+    This class is used for stashing HTML objects that we extract
+    in the beginning and replace with place-holders.
+    """
+
+    def __init__ (self):
+        """ Create a HtmlStash. """
+        self.html_counter = 0 # for counting inline html segments
+        self.rawHtmlBlocks=[]
+
+    def store(self, html, safe=False):
+        """
+        Saves an HTML segment for later reinsertion.  Returns a
+        placeholder string that needs to be inserted into the
+        document.
+
+        Keyword arguments:
+
+        * html: an html segment
+        * safe: label an html segment as safe for safemode
+
+        Returns : a placeholder string
+
+        """
+        self.rawHtmlBlocks.append((html, safe))
+        placeholder = HTML_PLACEHOLDER % self.html_counter
+        self.html_counter += 1
+        return placeholder
+
+    def reset(self):
+        self.html_counter = 0
+        self.rawHtmlBlocks = []
+
+
+class HtmlBlockPreprocessor(Preprocessor):
+    """Remove html blocks from the text and store them for later retrieval."""
+
+    right_tag_patterns = ["</%s>", "%s>"]
+
+    def _get_left_tag(self, block):
+        return block[1:].replace(">", " ", 1).split()[0].lower()
+
+    def _get_right_tag(self, left_tag, block):
+        for p in self.right_tag_patterns:
+            tag = p % left_tag
+            i = block.rfind(tag)
+            if i > 2:
+                return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
+        return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
+
+    def _equal_tags(self, left_tag, right_tag):
+        if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
+            return True
+        if ("/" + left_tag) == right_tag:
+            return True
+        if (right_tag == "--" and left_tag == "--"):
+            return True
+        elif left_tag == right_tag[1:] \
+            and right_tag[0] != "<":
+            return True
+        else:
+            return False
+
+    def _is_oneliner(self, tag):
+        return (tag in ['hr', 'hr/'])
+
+    def run(self, lines):
+        text = "\n".join(lines)
+        new_blocks = []
+        text = text.split("\n\n")
+        items = []
+        left_tag = ''
+        right_tag = ''
+        in_tag = False # flag
+
+        while text:
+            block = text[0]
+            if block.startswith("\n"):
+                block = block[1:]
+            text = text[1:]
+
+            if block.startswith("\n"):
+                block = block[1:]
+
+            if not in_tag:
+                if block.startswith("<"):
+                    left_tag = self._get_left_tag(block)
+                    right_tag, data_index = self._get_right_tag(left_tag, block)
+
+                    if data_index < len(block):
+                        text.insert(0, block[data_index:])
+                        block = block[:data_index]
+
+                    if not (markdown.isBlockLevel(left_tag) \
+                        or block[1] in ["!", "?", "@", "%"]):
+                        new_blocks.append(block)
+                        continue
+
+                    if self._is_oneliner(left_tag):
+                        new_blocks.append(block.strip())
+                        continue
+
+                    if block[1] == "!":
+                        # is a comment block
+                        left_tag = "--"
+                        right_tag, data_index = self._get_right_tag(left_tag, block)
+                        # keep checking conditions below and maybe just append
+
+                    if block.rstrip().endswith(">") \
+                        and self._equal_tags(left_tag, right_tag):
+                        new_blocks.append(
+                            self.markdown.htmlStash.store(block.strip()))
+                        continue
+                    else: #if not block[1] == "!":
+                        # if is block level tag and is not complete
+
+                        if markdown.isBlockLevel(left_tag) or left_tag == "--" \
+                        and not block.rstrip().endswith(">"):
+                            items.append(block.strip())
+                            in_tag = True
+                        else:
+                            new_blocks.append(
+                            self.markdown.htmlStash.store(block.strip()))
+
+                        continue
+
+                new_blocks.append(block)
+
+            else:
+                items.append(block.strip())
+
+                right_tag, data_index = self._get_right_tag(left_tag, block)
+
+                if self._equal_tags(left_tag, right_tag):
+                    # if find closing tag
+                    in_tag = False
+                    new_blocks.append(
+                        self.markdown.htmlStash.store('\n\n'.join(items)))
+                    items = []
+
+        if items:
+            new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
+            new_blocks.append('\n')
+
+        new_text = "\n\n".join(new_blocks)
+        return new_text.split("\n")
+
+
+class ReferencePreprocessor(Preprocessor):
+    """ Remove reference definitions from text and store for later use. """
+
+    RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
+
+    def run (self, lines):
+        new_text = [];
+        for line in lines:
+            m = self.RE.match(line)
+            if m:
+                id = m.group(2).strip().lower()
+                t = m.group(4).strip()  # potential title
+                if not t:
+                    self.markdown.references[id] = (m.group(3), t)
+                elif (len(t) >= 2
+                      and (t[0] == t[-1] == "\""
+                           or t[0] == t[-1] == "\'"
+                           or (t[0] == "(" and t[-1] == ")") ) ):
+                    self.markdown.references[id] = (m.group(3), t[1:-1])
+                else:
+                    new_text.append(line)
+            else:
+                new_text.append(line)
+
+        return new_text #+ "\n"

Added: packages-ng/markdown/treeprocessors.py
==============================================================================
--- (empty file)
+++ packages-ng/markdown/treeprocessors.py	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,329 @@
+import markdown
+import re
+
+def isString(s):
+    """ Check if it's string """
+    return isinstance(s, unicode) or isinstance(s, str)
+
+class Processor:
+    def __init__(self, markdown_instance=None):
+        if markdown_instance:
+            self.markdown = markdown_instance
+
+class Treeprocessor(Processor):
+    """
+    Treeprocessors are run on the ElementTree object before serialization.
+
+    Each Treeprocessor implements a "run" method that takes a pointer to an
+    ElementTree, modifies it as necessary and returns an ElementTree
+    object.
+
+    Treeprocessors must extend markdown.Treeprocessor.
+
+    """
+    def run(self, root):
+        """
+        Subclasses of Treeprocessor should implement a `run` method, which
+        takes a root ElementTree. This method can return another ElementTree 
+        object, and the existing root ElementTree will be replaced, or it can 
+        modify the current tree and return None.
+        """
+        pass
+
+
+class InlineProcessor(Treeprocessor):
+    """
+    A Treeprocessor that traverses a tree, applying inline patterns.
+    """
+
+    def __init__ (self, md):
+        self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX
+        self.__placeholder_suffix = markdown.ETX
+        self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+                                      + len(self.__placeholder_suffix)
+        self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})')
+        self.markdown = md
+
+    def __makePlaceholder(self, type):
+        """ Generate a placeholder """
+        id = "%04d" % len(self.stashed_nodes)
+        hash = markdown.INLINE_PLACEHOLDER % id
+        return hash, id
+
+    def __findPlaceholder(self, data, index):
+        """
+        Extract id from data string, start from index
+
+        Keyword arguments:
+
+        * data: string
+        * index: index, from which we start search
+
+        Returns: placeholder id and string index, after the found placeholder.
+        """
+
+        m = self.__placeholder_re.search(data, index)
+        if m:
+            return m.group(1), m.end()
+        else:
+            return None, index + 1
+
+    def __stashNode(self, node, type):
+        """ Add node to stash """
+        placeholder, id = self.__makePlaceholder(type)
+        self.stashed_nodes[id] = node
+        return placeholder
+
+    def __handleInline(self, data, patternIndex=0):
+        """
+        Process string with inline patterns and replace it
+        with placeholders
+
+        Keyword arguments:
+
+        * data: A line of Markdown text
+        * patternIndex: The index of the inlinePattern to start with
+
+        Returns: String with placeholders.
+
+        """
+        if not isinstance(data, markdown.AtomicString):
+            startIndex = 0
+            while patternIndex < len(self.markdown.inlinePatterns):
+                data, matched, startIndex = self.__applyPattern(
+                    self.markdown.inlinePatterns.value_for_index(patternIndex),
+                    data, patternIndex, startIndex)
+                if not matched:
+                    patternIndex += 1
+        return data
+
+    def __processElementText(self, node, subnode, isText=True):
+        """
+        Process placeholders in Element.text or Element.tail
+        of Elements popped from self.stashed_nodes.
+
+        Keywords arguments:
+
+        * node: parent node
+        * subnode: processing node
+        * isText: bool variable, True - it's text, False - it's tail
+
+        Returns: None
+
+        """
+        if isText:
+            text = subnode.text
+            subnode.text = None
+        else:
+            text = subnode.tail
+            subnode.tail = None
+
+        childResult = self.__processPlaceholders(text, subnode)
+
+        if not isText and node is not subnode:
+            pos = node.getchildren().index(subnode)
+            node.remove(subnode)
+        else:
+            pos = 0
+
+        childResult.reverse()
+        for newChild in childResult:
+            node.insert(pos, newChild)
+
+    def __processPlaceholders(self, data, parent):
+        """
+        Process string with placeholders and generate ElementTree tree.
+
+        Keyword arguments:
+
+        * data: string with placeholders instead of ElementTree elements.
+        * parent: Element, which contains processing inline data
+
+        Returns: list with ElementTree elements with applied inline patterns.
+        """
+        def linkText(text):
+            if text:
+                if result:
+                    if result[-1].tail:
+                        result[-1].tail += text
+                    else:
+                        result[-1].tail = text
+                else:
+                    if parent.text:
+                        parent.text += text
+                    else:
+                        parent.text = text
+
+        result = []
+        strartIndex = 0
+        while data:
+            index = data.find(self.__placeholder_prefix, strartIndex)
+            if index != -1:
+                id, phEndIndex = self.__findPlaceholder(data, index)
+
+                if id in self.stashed_nodes:
+                    node = self.stashed_nodes.get(id)
+
+                    if index > 0:
+                        text = data[strartIndex:index]
+                        linkText(text)
+
+                    if not isString(node): # it's Element
+                        for child in [node] + node.getchildren():
+                            if child.tail:
+                                if child.tail.strip():
+                                    self.__processElementText(node, child, False)
+                            if child.text:
+                                if child.text.strip():
+                                    self.__processElementText(child, child)
+                    else: # it's just a string
+                        linkText(node)
+                        strartIndex = phEndIndex
+                        continue
+
+                    strartIndex = phEndIndex
+                    result.append(node)
+
+                else: # wrong placeholder
+                    end = index + len(prefix)
+                    linkText(data[strartIndex:end])
+                    strartIndex = end
+            else:
+                text = data[strartIndex:]
+                linkText(text)
+                data = ""
+
+        return result
+
+    def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
+        """
+        Check if the line fits the pattern, create the necessary
+        elements, add it to stashed_nodes.
+
+        Keyword arguments:
+
+        * data: the text to be processed
+        * pattern: the pattern to be checked
+        * patternIndex: index of current pattern
+        * startIndex: string index, from which we starting search
+
+        Returns: String with placeholders instead of ElementTree elements.
+
+        """
+        match = pattern.getCompiledRegExp().match(data[startIndex:])
+        leftData = data[:startIndex]
+
+        if not match:
+            return data, False, 0
+
+        node = pattern.handleMatch(match)
+
+        if node is None:
+            return data, True, len(leftData) + match.span(len(match.groups()))[0]
+
+        if not isString(node):
+            if not isinstance(node.text, markdown.AtomicString):
+                # We need to process current node too
+                for child in [node] + node.getchildren():
+                    if not isString(node):
+                        if child.text:
+                            child.text = self.__handleInline(child.text,
+                                                            patternIndex + 1)
+                        if child.tail:
+                            child.tail = self.__handleInline(child.tail,
+                                                            patternIndex)
+
+        placeholder = self.__stashNode(node, pattern.type())
+
+        return "%s%s%s%s" % (leftData,
+                             match.group(1),
+                             placeholder, match.groups()[-1]), True, 0
+
+    def run(self, tree):
+        """Apply inline patterns to a parsed Markdown tree.
+
+        Iterate over ElementTree, find elements with inline tag, apply inline
+        patterns and append newly created Elements to tree.  If you don't
+        want process your data with inline paterns, instead of normal string,
+        use subclass AtomicString:
+
+            node.text = markdown.AtomicString("data won't be processed with inline patterns")
+
+        Arguments:
+
+        * markdownTree: ElementTree object, representing Markdown tree.
+
+        Returns: ElementTree object with applied inline patterns.
+
+        """
+        self.stashed_nodes = {}
+
+        stack = [tree]
+
+        while stack:
+            currElement = stack.pop()
+            insertQueue = []
+            for child in currElement.getchildren():
+                if child.text and not isinstance(child.text, markdown.AtomicString):
+                    text = child.text
+                    child.text = None
+                    lst = self.__processPlaceholders(self.__handleInline(
+                                                    text), child)
+                    stack += lst
+                    insertQueue.append((child, lst))
+
+                if child.getchildren():
+                    stack.append(child)
+
+            for element, lst in insertQueue:
+                if element.text:
+                    element.text = \
+                        markdown.inlinepatterns.handleAttributes(element.text, 
+                                                                 element)
+                i = 0
+                for newChild in lst:
+                    # Processing attributes
+                    if newChild.tail:
+                        newChild.tail = \
+                            markdown.inlinepatterns.handleAttributes(newChild.tail,
+                                                                     element)
+                    if newChild.text:
+                        newChild.text = \
+                            markdown.inlinepatterns.handleAttributes(newChild.text,
+                                                                     newChild)
+                    element.insert(i, newChild)
+                    i += 1
+        return tree
+
+
+class PrettifyTreeprocessor(Treeprocessor):
+    """ Add linebreaks to the html document. """
+
+    def _prettifyETree(self, elem):
+        """ Recursively add linebreaks to ElementTree children. """
+
+        i = "\n"
+        if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
+            if (not elem.text or not elem.text.strip()) \
+                    and len(elem) and markdown.isBlockLevel(elem[0].tag):
+                elem.text = i
+            for e in elem:
+                if markdown.isBlockLevel(e.tag):
+                    self._prettifyETree(e)
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+        if not elem.tail or not elem.tail.strip():
+            elem.tail = i
+
+    def run(self, root):
+        """ Add linebreaks to ElementTree root object. """
+
+        self._prettifyETree(root)
+        # Do <br />'s seperately as they are often in the middle of
+        # inline content and missed by _prettifyETree.
+        brs = root.getiterator('br')
+        for br in brs:
+            if not br.tail or not br.tail.strip():
+                br.tail = '\n'
+            else:
+                br.tail = '\n%s' % br.tail

Added: packages-ng/mergerepos
==============================================================================
--- (empty file)
+++ packages-ng/mergerepos	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+
+REPOS = [
+	'file:///home/users/patrys/Desktop/web/packages-ng/repo1/',
+	'file:///home/users/patrys/Desktop/web/packages-ng/repo2/',
+]
+OUTPUT = './repo3'
+
+from createrepo import merge
+from createrepo import MetaDataGenerator
+import os
+import shutil
+import tempfile
+import yum
+import yum.Errors
+
+class FixedMetaDataGenerator(MetaDataGenerator):
+	'''
+	Work around a bug in createrepo
+	'''
+	def doRepoMetadata(self):
+		"""wrapper to generate the repomd.xml file that stores the info on the other files"""
+		repodoc = libxml2.newDoc("1.0")
+		reporoot = repodoc.newChild(None, "repomd", None)
+		repons = reporoot.newNs('http://linux.duke.edu/metadata/repo', None)
+		reporoot.setNs(repons)
+		rpmns = reporoot.newNs("http://linux.duke.edu/metadata/rpm", 'rpm')		
+		repopath = os.path.join(self.conf.outputdir, self.conf.tempdir)
+		repofilepath = os.path.join(repopath, self.conf.repomdfile)
+		
+		revision = reporoot.newChild(None, 'revision', self.conf.revision)
+		if self.conf.content_tags or self.conf.distro_tags:
+			tags = reporoot.newChild(None, 'tags', None)
+			for item in self.conf.content_tags:
+				c_tags = tags.newChild(None, 'content', item)
+			for (cpeid,item) in self.conf.distro_tags:
+				d_tags = tags.newChild(None, 'distro', item)
+				if cpeid:
+					d_tags.newProp('cpeid', cpeid)
+
+		sumtype = self.conf.sumtype
+		if self.conf.database_only:
+			workfiles = []
+			db_workfiles = [(self.md_sqlite.pri_sqlite_file, 'primary_db'),
+							(self.md_sqlite.file_sqlite_file, 'filelists_db'),
+							(self.md_sqlite.other_sqlite_file, 'other_db')]
+			dbversion = '10'							
+		else:
+			workfiles = [(self.conf.otherfile, 'other',),
+						 (self.conf.filelistsfile, 'filelists'),
+						 (self.conf.primaryfile, 'primary')]
+			db_workfiles = []
+		repoid='garbageid'
+		
+		if self.conf.database:
+			if not self.conf.quiet: self.callback.log('Generating sqlite DBs')
+			try:
+				dbversion = str(sqlitecachec.DBVERSION)
+			except AttributeError:
+				dbversion = '9'
+			rp = sqlitecachec.RepodataParserSqlite(repopath, repoid, None)
+
+		for (file, ftype) in workfiles:
+			complete_path = os.path.join(repopath, file)
+			
+			zfo = _gzipOpen(complete_path)
+			uncsum = misc.checksum(sumtype, zfo)
+			zfo.close()
+			csum = misc.checksum(sumtype, complete_path)
+			timestamp = os.stat(complete_path)[8]
+
+			db_csums = {}
+			db_compressed_sums = {}
+			
+			if self.conf.database:
+				if self.conf.verbose:
+					self.callback.log("Starting %s db creation: %s" % (ftype, time.ctime()))
+			
+				if ftype == 'primary':
+					rp.getPrimary(complete_path, csum)
+								
+				elif ftype == 'filelists':
+					rp.getFilelists(complete_path, csum)
+					
+				elif ftype == 'other':
+					rp.getOtherdata(complete_path, csum)
+				
+				tmp_result_name = '%s.xml.gz.sqlite' % ftype
+				tmp_result_path = os.path.join(repopath, tmp_result_name)
+				good_name = '%s.sqlite' % ftype
+				resultpath = os.path.join(repopath, good_name)
+				
+				# rename from silly name to not silly name
+				os.rename(tmp_result_path, resultpath)
+				compressed_name = '%s.bz2' % good_name
+				result_compressed = os.path.join(repopath, compressed_name)
+				db_csums[ftype] = misc.checksum(sumtype, resultpath)
+
+				# compress the files
+				bzipFile(resultpath, result_compressed)
+				# csum the compressed file
+				db_compressed_sums[ftype] = misc.checksum(sumtype, result_compressed)
+				# remove the uncompressed file
+				os.unlink(resultpath)
+
+				if self.conf.unique_md_filenames:
+					csum_compressed_name = '%s-%s.bz2' % (db_compressed_sums[ftype], good_name)
+					csum_result_compressed =  os.path.join(repopath, csum_compressed_name)
+					os.rename(result_compressed, csum_result_compressed)
+					result_compressed = csum_result_compressed
+					compressed_name = csum_compressed_name
+					
+				# timestamp the compressed file
+				db_timestamp = os.stat(result_compressed)[8]
+				
+				# add this data as a section to the repomdxml
+				db_data_type = '%s_db' % ftype
+				data = reporoot.newChild(None, 'data', None)
+				data.newProp('type', db_data_type)
+				location = data.newChild(None, 'location', None)
+				if self.conf.baseurl is not None:
+					location.newProp('xml:base', self.conf.baseurl)
+				
+				location.newProp('href', os.path.join(self.conf.finaldir, compressed_name))
+				checksum = data.newChild(None, 'checksum', db_compressed_sums[ftype])
+				checksum.newProp('type', sumtype)
+				db_tstamp = data.newChild(None, 'timestamp', str(db_timestamp))
+				unchecksum = data.newChild(None, 'open-checksum', db_csums[ftype])
+				unchecksum.newProp('type', sumtype)
+				database_version = data.newChild(None, 'database_version', dbversion)
+				if self.conf.verbose:
+					self.callback.log("Ending %s db creation: %s" % (ftype, time.ctime()))
+
+			data = reporoot.newChild(None, 'data', None)
+			data.newProp('type', ftype)
+
+			checksum = data.newChild(None, 'checksum', csum)
+			checksum.newProp('type', sumtype)
+			timestamp = data.newChild(None, 'timestamp', str(timestamp))
+			unchecksum = data.newChild(None, 'open-checksum', uncsum)
+			unchecksum.newProp('type', sumtype)
+			location = data.newChild(None, 'location', None)
+			if self.conf.baseurl is not None:
+				location.newProp('xml:base', self.conf.baseurl)
+			if self.conf.unique_md_filenames:
+				res_file = '%s-%s.xml.gz' % (csum, ftype)
+				orig_file = os.path.join(repopath, file)
+				dest_file = os.path.join(repopath, res_file)
+				os.rename(orig_file, dest_file)
+				
+			else:
+				res_file = file
+
+			file = res_file 
+			
+			location.newProp('href', os.path.join(self.conf.finaldir, file))
+
+
+		if not self.conf.quiet and self.conf.database: self.callback.log('Sqlite DBs complete')		
+
+		for (fn, ftype) in db_workfiles:
+			attribs = {'database_version':dbversion}
+			self.addArbitraryMetadata(fn, ftype, reporoot, compress=True, 
+									  compress_type='bzip2', attribs=attribs)
+			try:
+				os.unlink(fn)
+			except (IOError, OSError), e:
+				pass
+
+			
+		if self.conf.groupfile is not None:
+			self.addArbitraryMetadata(self.conf.groupfile, 'group_gz', reporoot)
+			self.addArbitraryMetadata(self.conf.groupfile, 'group', reporoot, compress=False)			
+		
+		if self.conf.additional_metadata:
+			for md_type, mdfile in self.conf.additional_metadata.items():
+				self.addArbitraryMetadata(mdfile, md_type, reporoot)
+
+		# FIXME - disabled until we decide how best to use this
+		#if self.rpmlib_reqs:
+		#	rpmlib = reporoot.newChild(rpmns, 'lib', None)
+		#	for r in self.rpmlib_reqs.keys():
+		#		req  = rpmlib.newChild(rpmns, 'requires', r)
+				
+			
+		# save it down
+		try:
+			repodoc.saveFormatFileEnc(repofilepath, 'UTF-8', 1)
+		except:
+			self.callback.errorlog(_('Error saving temp file for repomd.xml: %s') % repofilepath)
+			raise MDError, 'Could not save temp file: %s' % repofilepath 
+
+		del repodoc
+
+class RepoMerge(merge.RepoMergeBase):
+	'''
+	Work around a bug in createrepo
+	'''
+	def write_metadata(self, outputdir=None):
+		mytempdir = tempfile.mkdtemp()
+		if self.groups:
+			comps_fn = mytempdir + '/groups.xml'
+			compsfile = open(comps_fn, 'w')
+			compsfile.write(self.yumbase.comps.xml())
+			compsfile.close()
+			self.mdconf.groupfile=comps_fn
+		
+		if self.updateinfo:
+			ui_fn = mytempdir + '/updateinfo.xml'
+			uifile = open(ui_fn, 'w')
+			umd = yum.update_md.UpdateMetadata()
+			for repo in self.yumbase.repos.listEnabled():
+				try: # attempt to grab the updateinfo.xml.gz from the repodata
+					umd.add(repo)
+				except yum.Errors.RepoMDError:
+					continue 
+			umd.xml(fileobj=uifile)
+			uifile.close()
+			self.mdconf.additional_metadata['updateinfo'] = ui_fn
+
+
+		self.mdconf.pkglist = self.yumbase.pkgSack
+		self.mdconf.directory = self.outputdir
+		if outputdir:
+			self.mdconf.directory = outputdir
+		# clean out what was there
+		if os.path.exists(self.mdconf.directory + '/repodata'):
+			shutil.rmtree(self.mdconf.directory + '/repodata')
+
+		if not os.path.exists(self.mdconf.directory):
+			os.makedirs(self.mdconf.directory)
+
+		mdgen = FixedMetaDataGenerator(config_obj=self.mdconf)
+		mdgen.doPkgMetadata()
+		mdgen.doRepoMetadata()
+		mdgen.doFinalMove()
+
+rmbase = RepoMerge(REPOS)
+rmbase.mdconf.database = True
+rmbase.outputdir = OUTPUT
+rmbase.database = True
+rmbase.database_only = True
+rmbase.merge_repos()
+rmbase.write_metadata()

Added: packages-ng/packages/templates/base.html
==============================================================================
--- (empty file)
+++ packages-ng/packages/templates/base.html	Fri Apr  3 15:26:29 2009
@@ -0,0 +1,8 @@
+<html>
+<head>
+	<title>{% block title %}PLD Linux Packages{% endblock %}</title>
+</head>
+<html>
+{% block content %}
+{% endblock %}
+</html>

Modified: packages-ng/packages/templates/details.html
==============================================================================
--- packages-ng/packages/templates/details.html	(original)
+++ packages-ng/packages/templates/details.html	Fri Apr  3 15:26:29 2009
@@ -1,8 +1,14 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 {% for package in packages %}
 <h2>Package: {{ package.name }} ({{ package.epoch }}:{{ package.version }}-{{ package.release }}.{{ package.arch }})</h2>
 
 <h3>{{ package.summary }}</h3>
-<p>{{ package.description }}</p>
+
+{{ package.description|markdown }}
 
 <p>Source package: <a href="{% url packages.views.browse_source package.rpm_sourcerpm %}">{{ package.rpm_sourcerpm }}</a></p>
 
@@ -42,3 +48,4 @@
 {% endfor %}
 </ul>
 {% endfor %}
+{% endblock %}

Modified: packages-ng/packages/templates/group.html
==============================================================================
--- packages-ng/packages/templates/group.html	(original)
+++ packages-ng/packages/templates/group.html	Fri Apr  3 15:26:29 2009
@@ -1,8 +1,18 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 <h2>Packages in {{ group }}:</h2>
 
 <dl>
-{% for package in packages %}
-	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a></dt>
-	<dd>{{ package.summary }}</dd>
+{% regroup packages by name as packages_grouped %}
+{% for package in packages_grouped %}
+	<dt><a href="{% url packages.views.details name=package.grouper %}">{{ package.grouper }}</a> — {{ package.list.0.summary }}</dt>
+	<dd>
+		{{ package.list.0.description|markdown }}
+	</dd>
 {% endfor %}
 </dl>
+{% endblock %}
+

Modified: packages-ng/packages/templates/index.html
==============================================================================
--- packages-ng/packages/templates/index.html	(original)
+++ packages-ng/packages/templates/index.html	Fri Apr  3 15:26:29 2009
@@ -1,3 +1,6 @@
+{% extends 'base.html' %}
+
+{% block content %}
 <h2>Search for packages:</h2>
 
 <form action="{% url packages.views.search %}" method="get">
@@ -9,7 +12,7 @@
 
 <ul>
 {% for group in groups %}
-	<li><a href="{% url packages.views.browse_group group %}">{{ group }}</a></li>
+	<li><a href="{% url packages.views.browse_group group|urlencode %}">{{ group }}</a></li>
 {% endfor %}
 </ul>
-
+{% endblock %}

Modified: packages-ng/packages/templates/provides.html
==============================================================================
--- packages-ng/packages/templates/provides.html	(original)
+++ packages-ng/packages/templates/provides.html	Fri Apr  3 15:26:29 2009
@@ -1,8 +1,14 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 <h2>Packages providing {{ provide }}:</h2>
 
 <dl>
 {% for package in packages %}
-	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a></dt>
-	<dd>{{ package.summary }}</dd>
+	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a> — {{ package.summary }}</dt>
+	<dd>{{ package.description|markdown }}</dd>
 {% endfor %}
 </dl>
+{% endblock %}

Modified: packages-ng/packages/templates/requires.html
==============================================================================
--- packages-ng/packages/templates/requires.html	(original)
+++ packages-ng/packages/templates/requires.html	Fri Apr  3 15:26:29 2009
@@ -1,8 +1,14 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 <h2>Packages requiring {{ require }}:</h2>
 
 <dl>
 {% for package in packages %}
-	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a></dt>
-	<dd>{{ package.summary }}</dd>
+	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a> — {{ package.summary }}</dt>
+	<dd>{{ package.description|markdown }}</dd>
 {% endfor %}
 </dl>
+{% endblock %}

Modified: packages-ng/packages/templates/search.html
==============================================================================
--- packages-ng/packages/templates/search.html	(original)
+++ packages-ng/packages/templates/search.html	Fri Apr  3 15:26:29 2009
@@ -1,3 +1,8 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 <h2>Search for packages:</h2>
 
 <form action="{% url packages.views.search %}" method="get">
@@ -7,9 +12,13 @@
 
 {% if packages %}
 <dl>
-{% for package in packages %}
-	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a></dt>
-	<dd>{{ package.summary }}</dd>
+{% regroup packages by name as packages_grouped %}
+{% for package in packages_grouped %}
+	<dt><a href="{% url packages.views.details name=package.grouper %}">{{ package.grouper }}</a> — {{ package.list.0.summary }}</dt>
+	<dd>
+		{{ package.list.0.description|markdown }}
+	</dd>
 {% endfor %}
 </dl>
 {% endif %}
+{% endblock %}

Modified: packages-ng/packages/templates/source.html
==============================================================================
--- packages-ng/packages/templates/source.html	(original)
+++ packages-ng/packages/templates/source.html	Fri Apr  3 15:26:29 2009
@@ -1,8 +1,14 @@
+{% extends 'base.html' %}
+
+{% load markup %}
+
+{% block content %}
 <h2>Packages built from {{ source }}:</h2>
 
 <dl>
 {% for package in packages %}
-	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a></dt>
-	<dd>{{ package.summary }}</dd>
+	<dt><a href="{% url packages.views.details name=package.name %}">{{ package }}</a> — {{ package.summary }}</dt>
+	<dd>{{ package.description|markdown }}</dd>
 {% endfor %}
 </dl>
+{% endblock %}

Modified: packages-ng/packages/views.py
==============================================================================
--- packages-ng/packages/views.py	(original)
+++ packages-ng/packages/views.py	Fri Apr  3 15:26:29 2009
@@ -30,6 +30,7 @@
 		return direct_to_template(request, 'search.html', {'form': form})
 
 def browse_group(request, group):
+	print group
 	packages = filter_packages(rpm_group = group)
 	return direct_to_template(request, 'group.html', {'packages': packages, 'group': group})
 

Modified: packages-ng/settings.py
==============================================================================
--- packages-ng/settings.py	(original)
+++ packages-ng/settings.py	Fri Apr  3 15:26:29 2009
@@ -81,6 +81,7 @@
     'django.contrib.auth',
     'django.contrib.contenttypes',
     'django.contrib.sessions',
+    'django.contrib.markup',
     'packages',
 )
 


More information about the pld-cvs-commit mailing list