summaryrefslogtreecommitdiff
path: root/paste/util
diff options
context:
space:
mode:
authormatt <matt@xcolour.net>2013-01-28 11:32:18 -0500
committermatt <matt@xcolour.net>2013-01-28 11:32:18 -0500
commit1afcb52d73271bbbd78f885451aa1b0e78c09871 (patch)
tree9145840d6036fcbc0b6647c88f679a567fa8c54d /paste/util
downloadpaste-git-stringio.tar.gz
Import StringIO so it can be used.stringio
Diffstat (limited to 'paste/util')
-rw-r--r--paste/util/PySourceColor.py2103
-rw-r--r--paste/util/UserDict24.py167
-rw-r--r--paste/util/__init__.py4
-rw-r--r--paste/util/classinit.py42
-rw-r--r--paste/util/classinstance.py38
-rw-r--r--paste/util/converters.py26
-rw-r--r--paste/util/dateinterval.py103
-rw-r--r--paste/util/datetimeutil.py361
-rw-r--r--paste/util/doctest24.py2665
-rw-r--r--paste/util/filemixin.py53
-rw-r--r--paste/util/finddata.py99
-rw-r--r--paste/util/findpackage.py26
-rw-r--r--paste/util/import_string.py95
-rw-r--r--paste/util/intset.py511
-rw-r--r--paste/util/ip4.py273
-rw-r--r--paste/util/killthread.py30
-rw-r--r--paste/util/looper.py152
-rw-r--r--paste/util/mimeparse.py160
-rw-r--r--paste/util/multidict.py397
-rw-r--r--paste/util/quoting.py98
-rw-r--r--paste/util/scgiserver.py171
-rw-r--r--paste/util/string24.py531
-rw-r--r--paste/util/subprocess24.py1152
-rw-r--r--paste/util/template.py758
-rw-r--r--paste/util/threadedprint.py250
-rw-r--r--paste/util/threadinglocal.py43
26 files changed, 10308 insertions, 0 deletions
diff --git a/paste/util/PySourceColor.py b/paste/util/PySourceColor.py
new file mode 100644
index 0000000..65406ec
--- /dev/null
+++ b/paste/util/PySourceColor.py
@@ -0,0 +1,2103 @@
+# -*- coding: Latin-1 -*-
+"""
+PySourceColor: color Python source code
+"""
+
+"""
+ PySourceColor.py
+
+----------------------------------------------------------------------------
+
+ A python source to colorized html/css/xhtml converter.
+ Hacked by M.E.Farmer Jr. 2004, 2005
+ Python license
+
+----------------------------------------------------------------------------
+
+ - HTML markup does not create w3c valid html, but it works on every
+ browser i've tried so far.(I.E.,Mozilla/Firefox,Opera,Konqueror,wxHTML).
+ - CSS markup is w3c validated html 4.01 strict,
+ but will not render correctly on all browsers.
+ - XHTML markup is w3c validated xhtml 1.0 strict,
+ like html 4.01, will not render correctly on all browsers.
+
+----------------------------------------------------------------------------
+
+Features:
+
+ -Three types of markup:
+ html (default)
+ css/html 4.01 strict
+ xhtml 1.0 strict
+
+ -Can tokenize and colorize:
+ 12 types of strings
+ 2 comment types
+ numbers
+ operators
+ brackets
+ math operators
+ class / name
+ def / name
+ decorator / name
+ keywords
+ arguments class/def/decorator
+ linenumbers
+ names
+ text
+
+ -Eight colorschemes built-in:
+ null
+ mono
+ lite (default)
+ dark
+ dark2
+ idle
+ viewcvs
+ pythonwin
+
+ -Header and footer
+ set to '' for builtin header / footer.
+ give path to a file containing the html
+ you want added as header or footer.
+
+ -Arbitrary text and html
+ html markup converts all to raw (TEXT token)
+ #@# for raw -> send raw text.
+ #$# for span -> inline html and text.
+ #%# for div -> block level html and text.
+
+ -Linenumbers
+ Supports all styles. New token is called LINENUMBER.
+ Defaults to NAME if not defined.
+
+ Style options
+
+ -ALL markups support these text styles:
+ b = bold
+ i = italic
+ u = underline
+ -CSS and XHTML has limited support for borders:
+ HTML markup functions will ignore these.
+ Optional: Border color in RGB hex
+ Defaults to the text forecolor.
+ #rrggbb = border color
+ Border size:
+ l = thick
+ m = medium
+ t = thin
+ Border type:
+ - = dashed
+ . = dotted
+ s = solid
+ d = double
+ g = groove
+ r = ridge
+ n = inset
+ o = outset
+ You can specify multiple sides,
+ they will all use the same style.
+ Optional: Default is full border.
+ v = bottom
+ < = left
+ > = right
+ ^ = top
+ NOTE: Specify the styles you want.
+ The markups will ignore unsupported styles
+ Also note not all browsers can show these options
+
+ -All tokens default to NAME if not defined
+ so the only absolutely critical ones to define are:
+ NAME, ERRORTOKEN, PAGEBACKGROUND
+
+----------------------------------------------------------------------------
+
+Example usage::
+
+ # import
+ import PySourceColor as psc
+ psc.convert('c:/Python22/PySourceColor.py', colors=psc.idle, show=1)
+
+ # from module import *
+ from PySourceColor import *
+ convert('c:/Python22/Lib', colors=lite, markup="css",
+ header='#$#<b>This is a simpe heading</b><hr/>')
+
+ # How to use a custom colorscheme, and most of the 'features'
+ from PySourceColor import *
+ new = {
+ ERRORTOKEN: ('bui','#FF8080',''),
+ DECORATOR_NAME: ('s','#AACBBC',''),
+ DECORATOR: ('n','#333333',''),
+ NAME: ('t.<v','#1133AA','#DDFF22'),
+ NUMBER: ('','#236676','#FF5555'),
+ OPERATOR: ('b','#454567','#BBBB11'),
+ MATH_OPERATOR: ('','#935623','#423afb'),
+ BRACKETS: ('b','#ac34bf','#6457a5'),
+ COMMENT: ('t-#0022FF','#545366','#AABBFF'),
+ DOUBLECOMMENT: ('<l#553455','#553455','#FF00FF'),
+ CLASS_NAME: ('m^v-','#000000','#FFFFFF'),
+ DEF_NAME: ('l=<v','#897845','#000022'),
+ KEYWORD: ('.b','#345345','#FFFF22'),
+ SINGLEQUOTE: ('mn','#223344','#AADDCC'),
+ SINGLEQUOTE_R: ('','#344522',''),
+ SINGLEQUOTE_U: ('','#234234',''),
+ DOUBLEQUOTE: ('m#0022FF','#334421',''),
+ DOUBLEQUOTE_R: ('','#345345',''),
+ DOUBLEQUOTE_U: ('','#678673',''),
+ TRIPLESINGLEQUOTE: ('tv','#FFFFFF','#000000'),
+ TRIPLESINGLEQUOTE_R: ('tbu','#443256','#DDFFDA'),
+ TRIPLESINGLEQUOTE_U: ('','#423454','#DDFFDA'),
+ TRIPLEDOUBLEQUOTE: ('li#236fd3b<>','#000000','#FFFFFF'),
+ TRIPLEDOUBLEQUOTE_R: ('tub','#000000','#FFFFFF'),
+ TRIPLEDOUBLEQUOTE_U: ('-', '#CCAABB','#FFFAFF'),
+ LINENUMBER: ('ib-','#ff66aa','#7733FF'),]
+ TEXT: ('','#546634',''),
+ PAGEBACKGROUND: '#FFFAAA',
+ }
+ if __name__ == '__main__':
+ import sys
+ convert(sys.argv[1], './xhtml.html', colors=new, markup='xhtml', show=1,
+ linenumbers=1)
+ convert(sys.argv[1], './html.html', colors=new, markup='html', show=1,
+ linenumbers=1)
+
+"""
+
+__all__ = ['ERRORTOKEN','DECORATOR_NAME', 'DECORATOR', 'ARGS', 'EXTRASPACE',
+ 'NAME', 'NUMBER', 'OPERATOR', 'COMMENT', 'MATH_OPERATOR',
+ 'DOUBLECOMMENT', 'CLASS_NAME', 'DEF_NAME', 'KEYWORD', 'BRACKETS',
+ 'SINGLEQUOTE','SINGLEQUOTE_R','SINGLEQUOTE_U','DOUBLEQUOTE',
+ 'DOUBLEQUOTE_R', 'DOUBLEQUOTE_U', 'TRIPLESINGLEQUOTE', 'TEXT',
+ 'TRIPLESINGLEQUOTE_R', 'TRIPLESINGLEQUOTE_U', 'TRIPLEDOUBLEQUOTE',
+ 'TRIPLEDOUBLEQUOTE_R', 'TRIPLEDOUBLEQUOTE_U', 'PAGEBACKGROUND',
+ 'LINENUMBER', 'CODESTART', 'CODEEND', 'PY', 'TOKEN_NAMES', 'CSSHOOK',
+ 'null', 'mono', 'lite', 'dark','dark2', 'pythonwin','idle',
+ 'viewcvs', 'Usage', 'cli', 'str2stdout', 'path2stdout', 'Parser',
+ 'str2file', 'str2html', 'str2css', 'str2markup', 'path2file',
+ 'path2html', 'convert', 'walkdir', 'defaultColors', 'showpage',
+ 'pageconvert','tagreplace', 'MARKUPDICT']
+__title__ = 'PySourceColor'
+__version__ = "2.1a"
+__date__ = '25 April 2005'
+__author__ = "M.E.Farmer Jr."
+__credits__ = '''This was originally based on a python recipe
+submitted by Jürgen Hermann to ASPN. Now based on the voices in my head.
+M.E.Farmer 2004, 2005
+Python license
+'''
+import os
+import sys
+import time
+import glob
+import getopt
+import keyword
+import token
+import tokenize
+import traceback
+try :
+ import cStringIO as StringIO
+except:
+ import StringIO
+# Do not edit
+NAME = token.NAME
+NUMBER = token.NUMBER
+COMMENT = tokenize.COMMENT
+OPERATOR = token.OP
+ERRORTOKEN = token.ERRORTOKEN
+ARGS = token.NT_OFFSET + 1
+DOUBLECOMMENT = token.NT_OFFSET + 2
+CLASS_NAME = token.NT_OFFSET + 3
+DEF_NAME = token.NT_OFFSET + 4
+KEYWORD = token.NT_OFFSET + 5
+SINGLEQUOTE = token.NT_OFFSET + 6
+SINGLEQUOTE_R = token.NT_OFFSET + 7
+SINGLEQUOTE_U = token.NT_OFFSET + 8
+DOUBLEQUOTE = token.NT_OFFSET + 9
+DOUBLEQUOTE_R = token.NT_OFFSET + 10
+DOUBLEQUOTE_U = token.NT_OFFSET + 11
+TRIPLESINGLEQUOTE = token.NT_OFFSET + 12
+TRIPLESINGLEQUOTE_R = token.NT_OFFSET + 13
+TRIPLESINGLEQUOTE_U = token.NT_OFFSET + 14
+TRIPLEDOUBLEQUOTE = token.NT_OFFSET + 15
+TRIPLEDOUBLEQUOTE_R = token.NT_OFFSET + 16
+TRIPLEDOUBLEQUOTE_U = token.NT_OFFSET + 17
+PAGEBACKGROUND = token.NT_OFFSET + 18
+DECORATOR = token.NT_OFFSET + 19
+DECORATOR_NAME = token.NT_OFFSET + 20
+BRACKETS = token.NT_OFFSET + 21
+MATH_OPERATOR = token.NT_OFFSET + 22
+LINENUMBER = token.NT_OFFSET + 23
+TEXT = token.NT_OFFSET + 24
+PY = token.NT_OFFSET + 25
+CODESTART = token.NT_OFFSET + 26
+CODEEND = token.NT_OFFSET + 27
+CSSHOOK = token.NT_OFFSET + 28
+EXTRASPACE = token.NT_OFFSET + 29
+
+# markup classname lookup
+MARKUPDICT = {
+ ERRORTOKEN: 'py_err',
+ DECORATOR_NAME: 'py_decn',
+ DECORATOR: 'py_dec',
+ ARGS: 'py_args',
+ NAME: 'py_name',
+ NUMBER: 'py_num',
+ OPERATOR: 'py_op',
+ COMMENT: 'py_com',
+ DOUBLECOMMENT: 'py_dcom',
+ CLASS_NAME: 'py_clsn',
+ DEF_NAME: 'py_defn',
+ KEYWORD: 'py_key',
+ SINGLEQUOTE: 'py_sq',
+ SINGLEQUOTE_R: 'py_sqr',
+ SINGLEQUOTE_U: 'py_squ',
+ DOUBLEQUOTE: 'py_dq',
+ DOUBLEQUOTE_R: 'py_dqr',
+ DOUBLEQUOTE_U: 'py_dqu',
+ TRIPLESINGLEQUOTE: 'py_tsq',
+ TRIPLESINGLEQUOTE_R: 'py_tsqr',
+ TRIPLESINGLEQUOTE_U: 'py_tsqu',
+ TRIPLEDOUBLEQUOTE: 'py_tdq',
+ TRIPLEDOUBLEQUOTE_R: 'py_tdqr',
+ TRIPLEDOUBLEQUOTE_U: 'py_tdqu',
+ BRACKETS: 'py_bra',
+ MATH_OPERATOR: 'py_mop',
+ LINENUMBER: 'py_lnum',
+ TEXT: 'py_text',
+ }
+# might help users that want to create custom schemes
+TOKEN_NAMES= {
+ ERRORTOKEN:'ERRORTOKEN',
+ DECORATOR_NAME:'DECORATOR_NAME',
+ DECORATOR:'DECORATOR',
+ ARGS:'ARGS',
+ NAME:'NAME',
+ NUMBER:'NUMBER',
+ OPERATOR:'OPERATOR',
+ COMMENT:'COMMENT',
+ DOUBLECOMMENT:'DOUBLECOMMENT',
+ CLASS_NAME:'CLASS_NAME',
+ DEF_NAME:'DEF_NAME',
+ KEYWORD:'KEYWORD',
+ SINGLEQUOTE:'SINGLEQUOTE',
+ SINGLEQUOTE_R:'SINGLEQUOTE_R',
+ SINGLEQUOTE_U:'SINGLEQUOTE_U',
+ DOUBLEQUOTE:'DOUBLEQUOTE',
+ DOUBLEQUOTE_R:'DOUBLEQUOTE_R',
+ DOUBLEQUOTE_U:'DOUBLEQUOTE_U',
+ TRIPLESINGLEQUOTE:'TRIPLESINGLEQUOTE',
+ TRIPLESINGLEQUOTE_R:'TRIPLESINGLEQUOTE_R',
+ TRIPLESINGLEQUOTE_U:'TRIPLESINGLEQUOTE_U',
+ TRIPLEDOUBLEQUOTE:'TRIPLEDOUBLEQUOTE',
+ TRIPLEDOUBLEQUOTE_R:'TRIPLEDOUBLEQUOTE_R',
+ TRIPLEDOUBLEQUOTE_U:'TRIPLEDOUBLEQUOTE_U',
+ BRACKETS:'BRACKETS',
+ MATH_OPERATOR:'MATH_OPERATOR',
+ LINENUMBER:'LINENUMBER',
+ TEXT:'TEXT',
+ PAGEBACKGROUND:'PAGEBACKGROUND',
+ }
+
+######################################################################
+# Edit colors and styles to taste
+# Create your own scheme, just copy one below , rename and edit.
+# Custom styles must at least define NAME, ERRORTOKEN, PAGEBACKGROUND,
+# all missing elements will default to NAME.
+# See module docstring for details on style attributes.
+######################################################################
+# Copy null and use it as a starter colorscheme.
+null = {# tokentype: ('tags border_color', 'textforecolor', 'textbackcolor')
+ ERRORTOKEN: ('','#000000',''),# Error token
+ DECORATOR_NAME: ('','#000000',''),# Decorator name
+ DECORATOR: ('','#000000',''),# @ symbol
+ ARGS: ('','#000000',''),# class,def,deco arguments
+ NAME: ('','#000000',''),# All other python text
+ NUMBER: ('','#000000',''),# 0->10
+ OPERATOR: ('','#000000',''),# ':','<=',';',',','.','==', etc
+ MATH_OPERATOR: ('','#000000',''),# '+','-','=','','**',etc
+ BRACKETS: ('','#000000',''),# '[',']','(',')','{','}'
+ COMMENT: ('','#000000',''),# Single comment
+ DOUBLECOMMENT: ('','#000000',''),## Double comment
+ CLASS_NAME: ('','#000000',''),# Class name
+ DEF_NAME: ('','#000000',''),# Def name
+ KEYWORD: ('','#000000',''),# Python keywords
+ SINGLEQUOTE: ('','#000000',''),# 'SINGLEQUOTE'
+ SINGLEQUOTE_R: ('','#000000',''),# r'SINGLEQUOTE'
+ SINGLEQUOTE_U: ('','#000000',''),# u'SINGLEQUOTE'
+ DOUBLEQUOTE: ('','#000000',''),# "DOUBLEQUOTE"
+ DOUBLEQUOTE_R: ('','#000000',''),# r"DOUBLEQUOTE"
+ DOUBLEQUOTE_U: ('','#000000',''),# u"DOUBLEQUOTE"
+ TRIPLESINGLEQUOTE: ('','#000000',''),# '''TRIPLESINGLEQUOTE'''
+ TRIPLESINGLEQUOTE_R: ('','#000000',''),# r'''TRIPLESINGLEQUOTE'''
+ TRIPLESINGLEQUOTE_U: ('','#000000',''),# u'''TRIPLESINGLEQUOTE'''
+ TRIPLEDOUBLEQUOTE: ('','#000000',''),# """TRIPLEDOUBLEQUOTE"""
+ TRIPLEDOUBLEQUOTE_R: ('','#000000',''),# r"""TRIPLEDOUBLEQUOTE"""
+ TRIPLEDOUBLEQUOTE_U: ('','#000000',''),# u"""TRIPLEDOUBLEQUOTE"""
+ TEXT: ('','#000000',''),# non python text
+ LINENUMBER: ('>ti#555555','#000000',''),# Linenumbers
+ PAGEBACKGROUND: '#FFFFFF'# set the page background
+ }
+
+mono = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('bu','#000000',''),
+ DECORATOR: ('b','#000000',''),
+ ARGS: ('b','#555555',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('b','#000000',''),
+ OPERATOR: ('b','#000000',''),
+ MATH_OPERATOR: ('b','#000000',''),
+ BRACKETS: ('b','#000000',''),
+ COMMENT: ('i','#999999',''),
+ DOUBLECOMMENT: ('b','#999999',''),
+ CLASS_NAME: ('bu','#000000',''),
+ DEF_NAME: ('b','#000000',''),
+ KEYWORD: ('b','#000000',''),
+ SINGLEQUOTE: ('','#000000',''),
+ SINGLEQUOTE_R: ('','#000000',''),
+ SINGLEQUOTE_U: ('','#000000',''),
+ DOUBLEQUOTE: ('','#000000',''),
+ DOUBLEQUOTE_R: ('','#000000',''),
+ DOUBLEQUOTE_U: ('','#000000',''),
+ TRIPLESINGLEQUOTE: ('','#000000',''),
+ TRIPLESINGLEQUOTE_R: ('','#000000',''),
+ TRIPLESINGLEQUOTE_U: ('','#000000',''),
+ TRIPLEDOUBLEQUOTE: ('i','#000000',''),
+ TRIPLEDOUBLEQUOTE_R: ('i','#000000',''),
+ TRIPLEDOUBLEQUOTE_U: ('i','#000000',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+dark = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#FFBBAA',''),
+ DECORATOR: ('b','#CC5511',''),
+ ARGS: ('b','#DDDDFF',''),
+ NAME: ('','#DDDDDD',''),
+ NUMBER: ('','#FF0000',''),
+ OPERATOR: ('b','#FAF785',''),
+ MATH_OPERATOR: ('b','#FAF785',''),
+ BRACKETS: ('b','#FAF785',''),
+ COMMENT: ('','#45FCA0',''),
+ DOUBLECOMMENT: ('i','#A7C7A9',''),
+ CLASS_NAME: ('b','#B666FD',''),
+ DEF_NAME: ('b','#EBAE5C',''),
+ KEYWORD: ('b','#8680FF',''),
+ SINGLEQUOTE: ('','#F8BAFE',''),
+ SINGLEQUOTE_R: ('','#F8BAFE',''),
+ SINGLEQUOTE_U: ('','#F8BAFE',''),
+ DOUBLEQUOTE: ('','#FF80C0',''),
+ DOUBLEQUOTE_R: ('','#FF80C0',''),
+ DOUBLEQUOTE_U: ('','#FF80C0',''),
+ TRIPLESINGLEQUOTE: ('','#FF9595',''),
+ TRIPLESINGLEQUOTE_R: ('','#FF9595',''),
+ TRIPLESINGLEQUOTE_U: ('','#FF9595',''),
+ TRIPLEDOUBLEQUOTE: ('','#B3FFFF',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#B3FFFF',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#B3FFFF',''),
+ TEXT: ('','#FFFFFF',''),
+ LINENUMBER: ('>mi#555555','#bbccbb','#333333'),
+ PAGEBACKGROUND: '#000000'
+ }
+
+dark2 = {
+ ERRORTOKEN: ('','#FF0000',''),
+ DECORATOR_NAME: ('b','#FFBBAA',''),
+ DECORATOR: ('b','#CC5511',''),
+ ARGS: ('b','#DDDDDD',''),
+ NAME: ('','#C0C0C0',''),
+ NUMBER: ('b','#00FF00',''),
+ OPERATOR: ('b','#FF090F',''),
+ MATH_OPERATOR: ('b','#EE7020',''),
+ BRACKETS: ('b','#FFB90F',''),
+ COMMENT: ('i','#D0D000','#522000'),#'#88AA88','#11111F'),
+ DOUBLECOMMENT: ('i','#D0D000','#522000'),#'#77BB77','#11111F'),
+ CLASS_NAME: ('b','#DD4080',''),
+ DEF_NAME: ('b','#FF8040',''),
+ KEYWORD: ('b','#4726d1',''),
+ SINGLEQUOTE: ('','#8080C0',''),
+ SINGLEQUOTE_R: ('','#8080C0',''),
+ SINGLEQUOTE_U: ('','#8080C0',''),
+ DOUBLEQUOTE: ('','#ADB9F1',''),
+ DOUBLEQUOTE_R: ('','#ADB9F1',''),
+ DOUBLEQUOTE_U: ('','#ADB9F1',''),
+ TRIPLESINGLEQUOTE: ('','#00C1C1',''),#A050C0
+ TRIPLESINGLEQUOTE_R: ('','#00C1C1',''),#A050C0
+ TRIPLESINGLEQUOTE_U: ('','#00C1C1',''),#A050C0
+ TRIPLEDOUBLEQUOTE: ('','#33E3E3',''),#B090E0
+ TRIPLEDOUBLEQUOTE_R: ('','#33E3E3',''),#B090E0
+ TRIPLEDOUBLEQUOTE_U: ('','#33E3E3',''),#B090E0
+ TEXT: ('','#C0C0C0',''),
+ LINENUMBER: ('>mi#555555','#bbccbb','#333333'),
+ PAGEBACKGROUND: '#000000'
+ }
+
+lite = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#BB4422',''),
+ DECORATOR: ('b','#3333AF',''),
+ ARGS: ('b','#000000',''),
+ NAME: ('','#333333',''),
+ NUMBER: ('b','#DD2200',''),
+ OPERATOR: ('b','#000000',''),
+ MATH_OPERATOR: ('b','#000000',''),
+ BRACKETS: ('b','#000000',''),
+ COMMENT: ('','#007F00',''),
+ DOUBLECOMMENT: ('','#608060',''),
+ CLASS_NAME: ('b','#0000DF',''),
+ DEF_NAME: ('b','#9C7A00',''),#f09030
+ KEYWORD: ('b','#0000AF',''),
+ SINGLEQUOTE: ('','#600080',''),
+ SINGLEQUOTE_R: ('','#600080',''),
+ SINGLEQUOTE_U: ('','#600080',''),
+ DOUBLEQUOTE: ('','#A0008A',''),
+ DOUBLEQUOTE_R: ('','#A0008A',''),
+ DOUBLEQUOTE_U: ('','#A0008A',''),
+ TRIPLESINGLEQUOTE: ('','#337799',''),
+ TRIPLESINGLEQUOTE_R: ('','#337799',''),
+ TRIPLESINGLEQUOTE_U: ('','#337799',''),
+ TRIPLEDOUBLEQUOTE: ('','#1166AA',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#1166AA',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#1166AA',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+idle = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('','#900090',''),
+ DECORATOR: ('','#FF7700',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('','#000000',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('','#DD0000',''),
+ DOUBLECOMMENT: ('','#DD0000',''),
+ CLASS_NAME: ('','#0000FF',''),
+ DEF_NAME: ('','#0000FF',''),
+ KEYWORD: ('','#FF7700',''),
+ SINGLEQUOTE: ('','#00AA00',''),
+ SINGLEQUOTE_R: ('','#00AA00',''),
+ SINGLEQUOTE_U: ('','#00AA00',''),
+ DOUBLEQUOTE: ('','#00AA00',''),
+ DOUBLEQUOTE_R: ('','#00AA00',''),
+ DOUBLEQUOTE_U: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE_R: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE_U: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#00AA00',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+pythonwin = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#DD0080',''),
+ DECORATOR: ('b','#000080',''),
+ ARGS: ('','#000000',''),
+ NAME: ('','#303030',''),
+ NUMBER: ('','#008080',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('','#007F00',''),
+ DOUBLECOMMENT: ('','#7F7F7F',''),
+ CLASS_NAME: ('b','#0000FF',''),
+ DEF_NAME: ('b','#007F7F',''),
+ KEYWORD: ('b','#000080',''),
+ SINGLEQUOTE: ('','#808000',''),
+ SINGLEQUOTE_R: ('','#808000',''),
+ SINGLEQUOTE_U: ('','#808000',''),
+ DOUBLEQUOTE: ('','#808000',''),
+ DOUBLEQUOTE_R: ('','#808000',''),
+ DOUBLEQUOTE_U: ('','#808000',''),
+ TRIPLESINGLEQUOTE: ('','#808000',''),
+ TRIPLESINGLEQUOTE_R: ('','#808000',''),
+ TRIPLESINGLEQUOTE_U: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#808000',''),
+ TEXT: ('','#303030',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+viewcvs = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('','#000000',''),
+ DECORATOR: ('','#000000',''),
+ ARGS: ('','#000000',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('','#000000',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('i','#b22222',''),
+ DOUBLECOMMENT: ('i','#b22222',''),
+ CLASS_NAME: ('','#000000',''),
+ DEF_NAME: ('b','#0000ff',''),
+ KEYWORD: ('b','#a020f0',''),
+ SINGLEQUOTE: ('b','#bc8f8f',''),
+ SINGLEQUOTE_R: ('b','#bc8f8f',''),
+ SINGLEQUOTE_U: ('b','#bc8f8f',''),
+ DOUBLEQUOTE: ('b','#bc8f8f',''),
+ DOUBLEQUOTE_R: ('b','#bc8f8f',''),
+ DOUBLEQUOTE_U: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE_R: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE_U: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE_R: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE_U: ('b','#bc8f8f',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+defaultColors = lite
+
+def Usage():
+ doc = """
+ -----------------------------------------------------------------------------
+ PySourceColor.py ver: %s
+ -----------------------------------------------------------------------------
+ Module summary:
+ This module is designed to colorize python source code.
+ Input--->python source
+ Output-->colorized (html, html4.01/css, xhtml1.0)
+ Standalone:
+ This module will work from the command line with options.
+ This module will work with redirected stdio.
+ Imported:
+ This module can be imported and used directly in your code.
+ -----------------------------------------------------------------------------
+ Command line options:
+ -h, --help
+ Optional-> Display this help message.
+ -t, --test
+ Optional-> Will ignore all others flags but --profile
+ test all schemes and markup combinations
+ -p, --profile
+ Optional-> Works only with --test or -t
+ runs profile.py and makes the test work in quiet mode.
+ -i, --in, --input
+ Optional-> If you give input on stdin.
+ Use any of these for the current dir (.,cwd)
+ Input can be file or dir.
+ Input from stdin use one of the following (-,stdin)
+ If stdin is used as input stdout is output unless specified.
+ -o, --out, --output
+ Optional-> output dir for the colorized source.
+ default: output dir is the input dir.
+ To output html to stdout use one of the following (-,stdout)
+ Stdout can be used without stdin if you give a file as input.
+ -c, --color
+ Optional-> null, mono, dark, dark2, lite, idle, pythonwin, viewcvs
+ default: dark
+ -s, --show
+ Optional-> Show page after creation.
+ default: no show
+ -m, --markup
+ Optional-> html, css, xhtml
+ css, xhtml also support external stylesheets (-e,--external)
+ default: HTML
+ -e, --external
+ Optional-> use with css, xhtml
+ Writes an style sheet instead of embedding it in the page
+ saves it as pystyle.css in the same directory.
+ html markup will silently ignore this flag.
+ -H, --header
+ Opional-> add a page header to the top of the output
+ -H
+ Builtin header (name,date,hrule)
+ --header
+ You must specify a filename.
+ The header file must be valid html
+ and must handle its own font colors.
+ ex. --header c:/tmp/header.txt
+ -F, --footer
+ Opional-> add a page footer to the bottom of the output
+ -F
+ Builtin footer (hrule,name,date)
+ --footer
+ You must specify a filename.
+ The footer file must be valid html
+ and must handle its own font colors.
+ ex. --footer c:/tmp/footer.txt
+ -l, --linenumbers
+ Optional-> default is no linenumbers
+ Adds line numbers to the start of each line in the code.
+ --convertpage
+ Given a webpage that has code embedded in tags it will
+ convert embedded code to colorized html.
+ (see pageconvert for details)
+ -----------------------------------------------------------------------------
+ Option usage:
+ # Test and show pages
+ python PySourceColor.py -t -s
+ # Test and only show profile results
+ python PySourceColor.py -t -p
+ # Colorize all .py,.pyw files in cwdir you can also use: (.,cwd)
+ python PySourceColor.py -i .
+ # Using long options w/ =
+ python PySourceColor.py --in=c:/myDir/my.py --color=lite --show
+ # Using short options w/out =
+ python PySourceColor.py -i c:/myDir/ -c idle -m css -e
+ # Using any mix
+ python PySourceColor.py --in . -o=c:/myDir --show
+ # Place a custom header on your files
+ python PySourceColor.py -i . -o c:/tmp -m xhtml --header c:/header.txt
+ -----------------------------------------------------------------------------
+ Stdio usage:
+ # Stdio using no options
+ python PySourceColor.py < c:/MyFile.py > c:/tmp/MyFile.html
+ # Using stdin alone automatically uses stdout for output: (stdin,-)
+ python PySourceColor.py -i- < c:/MyFile.py > c:/tmp/myfile.html
+ # Stdout can also be written to directly from a file instead of stdin
+ python PySourceColor.py -i c:/MyFile.py -m css -o- > c:/tmp/myfile.html
+ # Stdin can be used as input , but output can still be specified
+ python PySourceColor.py -i- -o c:/pydoc.py.html -s < c:/Python22/my.py
+ _____________________________________________________________________________
+ """
+ print doc % (__version__)
+ sys.exit(1)
+
+###################################################### Command line interface
+
+def cli():
+ """Handle command line args and redirections"""
+ try:
+ # try to get command line args
+ opts, args = getopt.getopt(sys.argv[1:],
+ "hseqtplHFi:o:c:m:h:f:",["help", "show", "quiet",
+ "test", "external", "linenumbers", "convertpage", "profile",
+ "input=", "output=", "color=", "markup=","header=", "footer="])
+ except getopt.GetoptError:
+ # on error print help information and exit:
+ Usage()
+ # init some names
+ input = None
+ output = None
+ colorscheme = None
+ markup = 'html'
+ header = None
+ footer = None
+ linenumbers = 0
+ show = 0
+ quiet = 0
+ test = 0
+ profile = 0
+ convertpage = 0
+ form = None
+ # if we have args then process them
+ for o, a in opts:
+ if o in ["-h", "--help"]:
+ Usage()
+ sys.exit()
+ if o in ["-o", "--output", "--out"]:
+ output = a
+ if o in ["-i", "--input", "--in"]:
+ input = a
+ if input in [".", "cwd"]:
+ input = os.getcwd()
+ if o in ["-s", "--show"]:
+ show = 1
+ if o in ["-q", "--quiet"]:
+ quiet = 1
+ if o in ["-t", "--test"]:
+ test = 1
+ if o in ["--convertpage"]:
+ convertpage = 1
+ if o in ["-p", "--profile"]:
+ profile = 1
+ if o in ["-e", "--external"]:
+ form = 'external'
+ if o in ["-m", "--markup"]:
+ markup = str(a)
+ if o in ["-l", "--linenumbers"]:
+ linenumbers = 1
+ if o in ["--header"]:
+ header = str(a)
+ elif o == "-H":
+ header = ''
+ if o in ["--footer"]:
+ footer = str(a)
+ elif o == "-F":
+ footer = ''
+ if o in ["-c", "--color"]:
+ try:
+ colorscheme = globals().get(a.lower())
+ except:
+ traceback.print_exc()
+ Usage()
+ if test:
+ if profile:
+ import profile
+ profile.run('_test(show=%s, quiet=%s)'%(show,quiet))
+ else:
+ # Parse this script in every possible colorscheme and markup
+ _test(show,quiet)
+ elif input in [None, "-", "stdin"] or output in ["-", "stdout"]:
+ # determine if we are going to use stdio
+ if input not in [None, "-", "stdin"]:
+ if os.path.isfile(input) :
+ path2stdout(input, colors=colorscheme, markup=markup,
+ linenumbers=linenumbers, header=header,
+ footer=footer, form=form)
+ else:
+ raise PathError, 'File does not exists!'
+ else:
+ try:
+ if sys.stdin.isatty():
+ raise InputError, 'Please check input!'
+ else:
+ if output in [None,"-","stdout"]:
+ str2stdout(sys.stdin.read(), colors=colorscheme,
+ markup=markup, header=header,
+ footer=footer, linenumbers=linenumbers,
+ form=form)
+ else:
+ str2file(sys.stdin.read(), outfile=output, show=show,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers, form=form)
+ except:
+ traceback.print_exc()
+ Usage()
+ else:
+ if os.path.exists(input):
+ if convertpage:
+ # if there was at least an input given we can proceed
+ pageconvert(input, out=output, colors=colorscheme,
+ show=show, markup=markup,linenumbers=linenumbers)
+ else:
+ # if there was at least an input given we can proceed
+ convert(source=input, outdir=output, colors=colorscheme,
+ show=show, markup=markup, quiet=quiet, header=header,
+ footer=footer, linenumbers=linenumbers, form=form)
+ else:
+ raise PathError, 'File does not exists!'
+ Usage()
+
+######################################################### Simple markup tests
+
+def _test(show=0, quiet=0):
+ """Test the parser and most of the functions.
+
+ There are 19 test total(eight colorschemes in three diffrent markups,
+ and a str2file test. Most functions are tested by this.
+ """
+ fi = sys.argv[0]
+ if not fi.endswith('.exe'):# Do not test if frozen as an archive
+ # this is a collection of test, most things are covered.
+ path2file(fi, '/tmp/null.html', null, show=show, quiet=quiet)
+ path2file(fi, '/tmp/null_css.html', null, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/mono.html', mono, show=show, quiet=quiet)
+ path2file(fi, '/tmp/mono_css.html', mono, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/lite.html', lite, show=show, quiet=quiet)
+ path2file(fi, '/tmp/lite_css.html', lite, show=show,
+ markup='css', quiet=quiet, header='', footer='',
+ linenumbers=1)
+ path2file(fi, '/tmp/lite_xhtml.html', lite, show=show,
+ markup='xhtml', quiet=quiet)
+ path2file(fi, '/tmp/dark.html', dark, show=show, quiet=quiet)
+ path2file(fi, '/tmp/dark_css.html', dark, show=show,
+ markup='css', quiet=quiet, linenumbers=1)
+ path2file(fi, '/tmp/dark2.html', dark2, show=show, quiet=quiet)
+ path2file(fi, '/tmp/dark2_css.html', dark2, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/dark2_xhtml.html', dark2, show=show,
+ markup='xhtml', quiet=quiet, header='', footer='',
+ linenumbers=1, form='external')
+ path2file(fi, '/tmp/idle.html', idle, show=show, quiet=quiet)
+ path2file(fi, '/tmp/idle_css.html', idle, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/viewcvs.html', viewcvs, show=show,
+ quiet=quiet, linenumbers=1)
+ path2file(fi, '/tmp/viewcvs_css.html', viewcvs, show=show,
+ markup='css', linenumbers=1, quiet=quiet)
+ path2file(fi, '/tmp/pythonwin.html', pythonwin, show=show,
+ quiet=quiet)
+ path2file(fi, '/tmp/pythonwin_css.html', pythonwin, show=show,
+ markup='css', quiet=quiet)
+ teststr=r'''"""This is a test of decorators and other things"""
+# This should be line 421...
+@whatever(arg,arg2)
+@A @B(arghh) @C
+def LlamaSaysNi(arg='Ni!',arg2="RALPH"):
+ """This docstring is deeply disturbed by all the llama references"""
+ print '%s The Wonder Llama says %s'% (arg2,arg)
+# So I was like duh!, and he was like ya know?!,
+# and so we were both like huh...wtf!? RTFM!! LOL!!;)
+@staticmethod## Double comments are KewL.
+def LlamasRLumpy():
+ """This docstring is too sexy to be here.
+ """
+ u"""
+=============================
+A Møøse once bit my sister...
+=============================
+ """
+ ## Relax, this won't hurt a bit, just a simple, painless procedure,
+ ## hold still while I get the anesthetizing hammer.
+ m = {'three':'1','won':'2','too':'3'}
+ o = r'fishy\fishy\fishy/fish\oh/where/is\my/little\..'
+ python = uR"""
+ No realli! She was Karving her initials øn the møøse with the sharpened end
+ of an interspace tøøthbrush given her by Svenge - her brother-in-law -an Oslo
+ dentist and star of many Norwegian møvies: "The Høt Hands of an Oslo
+ Dentist", "Fillings of Passion", "The Huge Mølars of Horst Nordfink"..."""
+ RU"""142 MEXICAN WHOOPING LLAMAS"""#<-Can you fit 142 llamas in a red box?
+ n = u' HERMSGERVØRDENBRØTBØRDA ' + """ YUTTE """
+ t = """SAMALLNIATNUOMNAIRODAUCE"""+"DENIARTYLLAICEPS04"
+ ## We apologise for the fault in the
+ ## comments. Those responsible have been
+ ## sacked.
+ y = '14 NORTH CHILEAN GUANACOS \
+(CLOSELY RELATED TO THE LLAMA)'
+ rules = [0,1,2,3,4,5]
+ print y'''
+ htmlPath = os.path.abspath('/tmp/strtest_lines.html')
+ str2file(teststr, htmlPath, colors=dark, markup='xhtml',
+ linenumbers=420, show=show)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ htmlPath = os.path.abspath('/tmp/strtest_nolines.html')
+ str2file(teststr, htmlPath, colors=dark, markup='xhtml',
+ show=show)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ else:
+ Usage()
+ return
+
+# emacs wants this: '
+
+####################################################### User funtctions
+
+def str2stdout(sourcestring, colors=None, title='', markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code(string) to colorized HTML. Writes to stdout.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ Parser(sourcestring, colors=colors, title=title, markup=markup,
+ header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+
+def path2stdout(sourcepath, title='', colors=None, markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts code(file) to colorized HTML. Writes to stdout.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ sourcestring = open(sourcepath).read()
+ Parser(sourcestring, colors=colors, title=sourcepath,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+
+def str2html(sourcestring, colors=None, title='',
+ markup='html', header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code(string) to colorized HTML. Returns an HTML string.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ stringIO = StringIO.StringIO()
+ Parser(sourcestring, colors=colors, title=title, out=stringIO,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+ stringIO.seek(0)
+ return stringIO.read()
+
+def str2css(sourcestring, colors=None, title='',
+ markup='css', header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code string to colorized CSS/HTML. Returns CSS/HTML string
+
+ If form != None then this will return (stylesheet_str, code_str)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ if markup.lower() not in ['css' ,'xhtml']:
+ markup = 'css'
+ stringIO = StringIO.StringIO()
+ parse = Parser(sourcestring, colors=colors, title=title,
+ out=stringIO, markup=markup,
+ header=header, footer=footer,
+ linenumbers=linenumbers)
+ parse.format(form)
+ stringIO.seek(0)
+ if form != None:
+ return parse._sendCSSStyle(external=1), stringIO.read()
+ else:
+ return None, stringIO.read()
+
+def str2markup(sourcestring, colors=None, title = '',
+ markup='xhtml', header=None, footer=None,
+ linenumbers=0, form=None):
+ """ Convert code strings into ([stylesheet or None], colorized string) """
+ if markup.lower() == 'html':
+ return None, str2html(sourcestring, colors=colors, title=title,
+ header=header, footer=footer, markup=markup,
+ linenumbers=linenumbers, form=form)
+ else:
+ return str2css(sourcestring, colors=colors, title=title,
+ header=header, footer=footer, markup=markup,
+ linenumbers=linenumbers, form=form)
+
+def str2file(sourcestring, outfile, colors=None, title='',
+ markup='html', header=None, footer=None,
+ linenumbers=0, show=0, dosheet=1, form=None):
+ """Converts a code string to a file.
+
+ makes no attempt at correcting bad pathnames
+ """
+ css , html = str2markup(sourcestring, colors=colors, title='',
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers, form=form)
+ # write html
+ f = open(outfile,'wt')
+ f.writelines(html)
+ f.close()
+ #write css
+ if css != None and dosheet:
+ dir = os.path.dirname(outfile)
+ outcss = os.path.join(dir,'pystyle.css')
+ f = open(outcss,'wt')
+ f.writelines(css)
+ f.close()
+ if show:
+ showpage(outfile)
+
+def path2html(sourcepath, colors=None, markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts code(file) to colorized HTML. Returns an HTML string.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ stringIO = StringIO.StringIO()
+ sourcestring = open(sourcepath).read()
+ Parser(sourcestring, colors, title=sourcepath, out=stringIO,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+ stringIO.seek(0)
+ return stringIO.read()
+
+def convert(source, outdir=None, colors=None,
+ show=0, markup='html', quiet=0,
+ header=None, footer=None, linenumbers=0, form=None):
+ """Takes a file or dir as input and places the html in the outdir.
+
+ If outdir is none it defaults to the input dir
+ """
+ count=0
+ # If it is a filename then path2file
+ if not os.path.isdir(source):
+ if os.path.isfile(source):
+ count+=1
+ path2file(source, outdir, colors, show, markup,
+ quiet, form, header, footer, linenumbers, count)
+ else:
+ raise PathError, 'File does not exist!'
+ # If we pass in a dir we need to walkdir for files.
+ # Then we need to colorize them with path2file
+ else:
+ fileList = walkdir(source)
+ if fileList != None:
+ # make sure outdir is a dir
+ if outdir != None:
+ if os.path.splitext(outdir)[1] != '':
+ outdir = os.path.split(outdir)[0]
+ for item in fileList:
+ count+=1
+ path2file(item, outdir, colors, show, markup,
+ quiet, form, header, footer, linenumbers, count)
+ _printinfo('Completed colorizing %s files.'%str(count), quiet)
+ else:
+ _printinfo("No files to convert in dir.", quiet)
+
+def path2file(sourcePath, out=None, colors=None, show=0,
+ markup='html', quiet=0, form=None,
+ header=None, footer=None, linenumbers=0, count=1):
+ """ Converts python source to html file"""
+ # If no outdir is given we use the sourcePath
+ if out == None:#this is a guess
+ htmlPath = sourcePath + '.html'
+ else:
+ # If we do give an out_dir, and it does
+ # not exist , it will be created.
+ if os.path.splitext(out)[1] == '':
+ if not os.path.isdir(out):
+ os.makedirs(out)
+ sourceName = os.path.basename(sourcePath)
+ htmlPath = os.path.join(out,sourceName)+'.html'
+ # If we do give an out_name, and its dir does
+ # not exist , it will be created.
+ else:
+ outdir = os.path.split(out)[0]
+ if not os.path.isdir(outdir):
+ os.makedirs(outdir)
+ htmlPath = out
+ htmlPath = os.path.abspath(htmlPath)
+ # Open the text and do the parsing.
+ source = open(sourcePath).read()
+ parse = Parser(source, colors, sourcePath, open(htmlPath, 'wt'),
+ markup, header, footer, linenumbers)
+ parse.format(form)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ # html markup will ignore the external flag, but
+ # we need to stop the blank file from being written.
+ if form == 'external' and count == 1 and markup != 'html':
+ cssSheet = parse._sendCSSStyle(external=1)
+ cssPath = os.path.join(os.path.dirname(htmlPath),'pystyle.css')
+ css = open(cssPath, 'wt')
+ css.write(cssSheet)
+ css.close()
+ _printinfo(" wrote %s" % cssPath, quiet)
+ if show:
+ # load HTML page into the default web browser.
+ showpage(htmlPath)
+ return htmlPath
+
+def tagreplace(sourcestr, colors=lite, markup='xhtml',
+ linenumbers=0, dosheet=1, tagstart='<PY>'.lower(),
+ tagend='</PY>'.lower(), stylesheet='pystyle.css'):
+ """This is a helper function for pageconvert. Returns css, page.
+ """
+ if markup.lower() != 'html':
+ link = '<link rel="stylesheet" href="%s" type="text/css"/></head>'
+ css = link%stylesheet
+ if sourcestr.find(css) == -1:
+ sourcestr = sourcestr.replace('</head>', css, 1)
+ starttags = sourcestr.count(tagstart)
+ endtags = sourcestr.count(tagend)
+ if starttags:
+ if starttags == endtags:
+ for _ in range(starttags):
+ datastart = sourcestr.find(tagstart)
+ dataend = sourcestr.find(tagend)
+ data = sourcestr[datastart+len(tagstart):dataend]
+ data = unescape(data)
+ css , data = str2markup(data, colors=colors,
+ linenumbers=linenumbers, markup=markup, form='embed')
+ start = sourcestr[:datastart]
+ end = sourcestr[dataend+len(tagend):]
+ sourcestr = ''.join([start,data,end])
+ else:
+ raise InputError,'Tag mismatch!\nCheck %s,%s tags'%tagstart,tagend
+ if not dosheet:
+ css = None
+ return css, sourcestr
+
+def pageconvert(path, out=None, colors=lite, markup='xhtml', linenumbers=0,
+ dosheet=1, tagstart='<PY>'.lower(), tagend='</PY>'.lower(),
+ stylesheet='pystyle', show=1, returnstr=0):
+ """This function can colorize Python source
+
+ that is written in a webpage enclosed in tags.
+ """
+ if out == None:
+ out = os.path.dirname(path)
+ infile = open(path, 'r').read()
+ css,page = tagreplace(sourcestr=infile,colors=colors,
+ markup=markup, linenumbers=linenumbers, dosheet=dosheet,
+ tagstart=tagstart, tagend=tagend, stylesheet=stylesheet)
+ if not returnstr:
+ newpath = os.path.abspath(os.path.join(
+ out,'tmp', os.path.basename(path)))
+ if not os.path.exists(newpath):
+ try:
+ os.makedirs(os.path.dirname(newpath))
+ except:
+ pass#traceback.print_exc()
+ #Usage()
+ y = open(newpath, 'w')
+ y.write(page)
+ y.close()
+ if css:
+ csspath = os.path.abspath(os.path.join(
+ out,'tmp','%s.css'%stylesheet))
+ x = open(csspath,'w')
+ x.write(css)
+ x.close()
+ if show:
+ try:
+ os.startfile(newpath)
+ except:
+ traceback.print_exc()
+ return newpath
+ else:
+ return css, page
+
+##################################################################### helpers
+
+def walkdir(dir):
+ """Return a list of .py and .pyw files from a given directory.
+
+ This function can be written as a generator Python 2.3, or a genexp
+ in Python 2.4. But 2.2 and 2.1 would be left out....
+ """
+ # Get a list of files that match *.py*
+ GLOB_PATTERN = os.path.join(dir, "*.[p][y]*")
+ pathlist = glob.glob(GLOB_PATTERN)
+ # Now filter out all but py and pyw
+ filterlist = [x for x in pathlist
+ if x.endswith('.py')
+ or x.endswith('.pyw')]
+ if filterlist != []:
+ # if we have a list send it
+ return filterlist
+ else:
+ return None
+
+def showpage(path):
+ """Helper function to open webpages"""
+ try:
+ import webbrowser
+ webbrowser.open_new(os.path.abspath(path))
+ except:
+ traceback.print_exc()
+
+def _printinfo(message, quiet):
+ """Helper to print messages"""
+ if not quiet:
+ print message
+
+def escape(text):
+ """escape text for html. similar to cgi.escape"""
+ text = text.replace("&", "&amp;")
+ text = text.replace("<", "&lt;")
+ text = text.replace(">", "&gt;")
+ return text
+
+def unescape(text):
+ """unsecape escaped text"""
+ text = text.replace("&quot;", '"')
+ text = text.replace("&gt;", ">")
+ text = text.replace("&lt;", "<")
+ text = text.replace("&amp;", "&")
+ return text
+
+########################################################### Custom Exceptions
+
+class PySourceColorError(Exception):
+ # Base for custom errors
+ def __init__(self, msg=''):
+ self._msg = msg
+ Exception.__init__(self, msg)
+ def __repr__(self):
+ return self._msg
+ __str__ = __repr__
+
+class PathError(PySourceColorError):
+ def __init__(self, msg):
+ PySourceColorError.__init__(self,
+ 'Path error! : %s'% msg)
+
+class InputError(PySourceColorError):
+ def __init__(self, msg):
+ PySourceColorError.__init__(self,
+ 'Input error! : %s'% msg)
+
+########################################################## Python code parser
+
+class Parser(object):
+
+ """MoinMoin python parser heavily chopped :)"""
+
+ def __init__(self, raw, colors=None, title='', out=sys.stdout,
+ markup='html', header=None, footer=None, linenumbers=0):
+ """Store the source text & set some flags"""
+ if colors == None:
+ colors = defaultColors
+ self.raw = raw.expandtabs().rstrip()
+ self.title = os.path.basename(title)
+ self.out = out
+ self.line = ''
+ self.lasttext = ''
+ self.argFlag = 0
+ self.classFlag = 0
+ self.defFlag = 0
+ self.decoratorFlag = 0
+ self.external = 0
+ self.markup = markup.upper()
+ self.colors = colors
+ self.header = header
+ self.footer = footer
+ self.doArgs = 1 # overrides the new tokens
+ self.doNames = 1 # overrides the new tokens
+ self.doMathOps = 1 # overrides the new tokens
+ self.doBrackets = 1 # overrides the new tokens
+ self.doURL = 1 # override url conversion
+ self.LINENUMHOLDER = "___line___".upper()
+ self.LINESTART = "___start___".upper()
+ self.skip = 0
+ # add space left side of code for padding.Override in color dict.
+ self.extraspace = self.colors.get(EXTRASPACE, '')
+ # Linenumbers less then zero also have numberlinks
+ self.dolinenums = self.linenum = abs(linenumbers)
+ if linenumbers < 0:
+ self.numberlinks = 1
+ else:
+ self.numberlinks = 0
+
+ def format(self, form=None):
+ """Parse and send the colorized source"""
+ if form in ('snip','code'):
+ self.addEnds = 0
+ elif form == 'embed':
+ self.addEnds = 0
+ self.external = 1
+ else:
+ if form == 'external':
+ self.external = 1
+ self.addEnds = 1
+
+ # Store line offsets in self.lines
+ self.lines = [0, 0]
+ pos = 0
+
+ # Add linenumbers
+ if self.dolinenums:
+ start=self.LINENUMHOLDER+' '+self.extraspace
+ else:
+ start=''+self.extraspace
+ newlines = []
+ lines = self.raw.splitlines(0)
+ for l in lines:
+ # span and div escape for customizing and embedding raw text
+ if (l.startswith('#$#')
+ or l.startswith('#%#')
+ or l.startswith('#@#')):
+ newlines.append(l)
+ else:
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ newlines.append(self.LINESTART+' '+start+l)
+ else:
+ newlines.append(start+l)
+ self.raw = "\n".join(newlines)+'\n'# plus an extra newline at the end
+
+ # Gather lines
+ while 1:
+ pos = self.raw.find('\n', pos) + 1
+ if not pos: break
+ self.lines.append(pos)
+ self.lines.append(len(self.raw))
+
+ # Wrap text in a filelike object
+ self.pos = 0
+ text = StringIO.StringIO(self.raw)
+
+ # Markup start
+ if self.addEnds:
+ self._doPageStart()
+ else:
+ self._doSnippetStart()
+
+ ## Tokenize calls the __call__
+ ## function for each token till done.
+ # Parse the source and write out the results.
+ try:
+ tokenize.tokenize(text.readline, self)
+ except tokenize.TokenError, ex:
+ msg = ex[0]
+ line = ex[1][0]
+ self.out.write("<h3>ERROR: %s</h3>%s\n"%
+ (msg, self.raw[self.lines[line]:]))
+ #traceback.print_exc()
+
+ # Markup end
+ if self.addEnds:
+ self._doPageEnd()
+ else:
+ self._doSnippetEnd()
+
+ def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
+ """Token handler. Order is important do not rearrange."""
+ self.line = line
+ # Calculate new positions
+ oldpos = self.pos
+ newpos = self.lines[srow] + scol
+ self.pos = newpos + len(toktext)
+ # Handle newlines
+ if toktype in (token.NEWLINE, tokenize.NL):
+ self.decoratorFlag = self.argFlag = 0
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ self.out.write('</span>')
+ self.out.write('\n')
+ return
+
+ # Send the original whitespace, and tokenize backslashes if present.
+ # Tokenizer.py just sends continued line backslashes with whitespace.
+ # This is a hack to tokenize continued line slashes as operators.
+ # Should continued line backslashes be treated as operators
+ # or some other token?
+
+ if newpos > oldpos:
+ if self.raw[oldpos:newpos].isspace():
+ # consume a single space after linestarts and linenumbers
+ # had to have them so tokenizer could seperate them.
+ # multiline strings are handled by do_Text functions
+ if self.lasttext != self.LINESTART \
+ and self.lasttext != self.LINENUMHOLDER:
+ self.out.write(self.raw[oldpos:newpos])
+ else:
+ self.out.write(self.raw[oldpos+1:newpos])
+ else:
+ slash = self.raw[oldpos:newpos].find('\\')+oldpos
+ self.out.write(self.raw[oldpos:slash])
+ getattr(self, '_send%sText'%(self.markup))(OPERATOR, '\\')
+ self.linenum+=1
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ self.out.write('</span>')
+ self.out.write(self.raw[slash+1:newpos])
+
+ # Skip indenting tokens
+ if toktype in (token.INDENT, token.DEDENT):
+ self.pos = newpos
+ return
+
+ # Look for operators
+ if token.LPAR <= toktype and toktype <= token.OP:
+ # Trap decorators py2.4 >
+ if toktext == '@':
+ toktype = DECORATOR
+ # Set a flag if this was the decorator start so
+ # the decorator name and arguments can be identified
+ self.decoratorFlag = self.argFlag = 1
+ else:
+ if self.doArgs:
+ # Find the start for arguments
+ if toktext == '(' and self.argFlag:
+ self.argFlag = 2
+ # Find the end for arguments
+ elif toktext == ':':
+ self.argFlag = 0
+ ## Seperate the diffrent operator types
+ # Brackets
+ if self.doBrackets and toktext in ['[',']','(',')','{','}']:
+ toktype = BRACKETS
+ # Math operators
+ elif self.doMathOps and toktext in ['*=','**=','-=','+=','|=',
+ '%=','>>=','<<=','=','^=',
+ '/=', '+','-','**','*','/','%']:
+ toktype = MATH_OPERATOR
+ # Operator
+ else:
+ toktype = OPERATOR
+ # example how flags should work.
+ # def fun(arg=argvalue,arg2=argvalue2):
+ # 0 1 2 A 1 N 2 A 1 N 0
+ if toktext == "=" and self.argFlag == 2:
+ self.argFlag = 1
+ elif toktext == "," and self.argFlag == 1:
+ self.argFlag = 2
+ # Look for keywords
+ elif toktype == NAME and keyword.iskeyword(toktext):
+ toktype = KEYWORD
+ # Set a flag if this was the class / def start so
+ # the class / def name and arguments can be identified
+ if toktext in ['class', 'def']:
+ if toktext =='class' and \
+ not line[:line.find('class')].endswith('.'):
+ self.classFlag = self.argFlag = 1
+ elif toktext == 'def' and \
+ not line[:line.find('def')].endswith('.'):
+ self.defFlag = self.argFlag = 1
+ else:
+ # must have used a keyword as a name i.e. self.class
+ toktype = ERRORTOKEN
+
+ # Look for class, def, decorator name
+ elif (self.classFlag or self.defFlag or self.decoratorFlag) \
+ and self.doNames:
+ if self.classFlag:
+ self.classFlag = 0
+ toktype = CLASS_NAME
+ elif self.defFlag:
+ self.defFlag = 0
+ toktype = DEF_NAME
+ elif self.decoratorFlag:
+ self.decoratorFlag = 0
+ toktype = DECORATOR_NAME
+
+ # Look for strings
+ # Order of evaluation is important do not change.
+ elif toktype == token.STRING:
+ text = toktext.lower()
+ # TRIPLE DOUBLE QUOTE's
+ if (text[:3] == '"""'):
+ toktype = TRIPLEDOUBLEQUOTE
+ elif (text[:4] == 'r"""'):
+ toktype = TRIPLEDOUBLEQUOTE_R
+ elif (text[:4] == 'u"""' or
+ text[:5] == 'ur"""'):
+ toktype = TRIPLEDOUBLEQUOTE_U
+ # DOUBLE QUOTE's
+ elif (text[:1] == '"'):
+ toktype = DOUBLEQUOTE
+ elif (text[:2] == 'r"'):
+ toktype = DOUBLEQUOTE_R
+ elif (text[:2] == 'u"' or
+ text[:3] == 'ur"'):
+ toktype = DOUBLEQUOTE_U
+ # TRIPLE SINGLE QUOTE's
+ elif (text[:3] == "'''"):
+ toktype = TRIPLESINGLEQUOTE
+ elif (text[:4] == "r'''"):
+ toktype = TRIPLESINGLEQUOTE_R
+ elif (text[:4] == "u'''" or
+ text[:5] == "ur'''"):
+ toktype = TRIPLESINGLEQUOTE_U
+ # SINGLE QUOTE's
+ elif (text[:1] == "'"):
+ toktype = SINGLEQUOTE
+ elif (text[:2] == "r'"):
+ toktype = SINGLEQUOTE_R
+ elif (text[:2] == "u'" or
+ text[:3] == "ur'"):
+ toktype = SINGLEQUOTE_U
+
+ # test for invalid string declaration
+ if self.lasttext.lower() == 'ru':
+ toktype = ERRORTOKEN
+
+ # Look for comments
+ elif toktype == COMMENT:
+ if toktext[:2] == "##":
+ toktype = DOUBLECOMMENT
+ elif toktext[:3] == '#$#':
+ toktype = TEXT
+ self.textFlag = 'SPAN'
+ toktext = toktext[3:]
+ elif toktext[:3] == '#%#':
+ toktype = TEXT
+ self.textFlag = 'DIV'
+ toktext = toktext[3:]
+ elif toktext[:3] == '#@#':
+ toktype = TEXT
+ self.textFlag = 'RAW'
+ toktext = toktext[3:]
+ if self.doURL:
+ # this is a 'fake helper function'
+ # url(URI,Alias_name) or url(URI)
+ url_pos = toktext.find('url(')
+ if url_pos != -1:
+ before = toktext[:url_pos]
+ url = toktext[url_pos+4:]
+ splitpoint = url.find(',')
+ endpoint = url.find(')')
+ after = url[endpoint+1:]
+ url = url[:endpoint]
+ if splitpoint != -1:
+ urlparts = url.split(',',1)
+ toktext = '%s<a href="%s">%s</a>%s'%(
+ before,urlparts[0],urlparts[1].lstrip(),after)
+ else:
+ toktext = '%s<a href="%s">%s</a>%s'%(before,url,url,after)
+
+ # Seperate errors from decorators
+ elif toktype == ERRORTOKEN:
+ # Bug fix for < py2.4
+ # space between decorators
+ if self.argFlag and toktext.isspace():
+ #toktype = NAME
+ self.out.write(toktext)
+ return
+ # Bug fix for py2.2 linenumbers with decorators
+ elif toktext.isspace():
+ # What if we have a decorator after a >>> or ...
+ #p = line.find('@')
+ #if p >= 0 and not line[:p].isspace():
+ #self.out.write(toktext)
+ #return
+ if self.skip:
+ self.skip=0
+ return
+ else:
+ self.out.write(toktext)
+ return
+ # trap decorators < py2.4
+ elif toktext == '@':
+ toktype = DECORATOR
+ # Set a flag if this was the decorator start so
+ # the decorator name and arguments can be identified
+ self.decoratorFlag = self.argFlag = 1
+
+ # Seperate args from names
+ elif (self.argFlag == 2 and
+ toktype == NAME and
+ toktext != 'None' and
+ self.doArgs):
+ toktype = ARGS
+
+ # Look for line numbers
+ # The conversion code for them is in the send_text functions.
+ if toktext in [self.LINENUMHOLDER,self.LINESTART]:
+ toktype = LINENUMBER
+ # if we don't have linenumbers set flag
+ # to skip the trailing space from linestart
+ if toktext == self.LINESTART and not self.dolinenums \
+ or toktext == self.LINENUMHOLDER:
+ self.skip=1
+
+
+ # Skip blank token that made it thru
+ ## bugfix for the last empty tag.
+ if toktext == '':
+ return
+
+ # Last token text history
+ self.lasttext = toktext
+
+ # escape all but the urls in the comments
+ if toktype in (DOUBLECOMMENT, COMMENT):
+ if toktext.find('<a href=') == -1:
+ toktext = escape(toktext)
+ else:
+ pass
+ elif toktype == TEXT:
+ pass
+ else:
+ toktext = escape(toktext)
+
+ # Send text for any markup
+ getattr(self, '_send%sText'%(self.markup))(toktype, toktext)
+ return
+
+ ################################################################# Helpers
+
+ def _doSnippetStart(self):
+ if self.markup == 'HTML':
+ # Start of html snippet
+ self.out.write('<pre>\n')
+ else:
+ # Start of css/xhtml snippet
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+
+ def _doSnippetEnd(self):
+ # End of html snippet
+ self.out.write(self.colors.get(CODEEND,'</pre>\n'))
+
+ ######################################################## markup selectors
+
+ def _getFile(self, filepath):
+ try:
+ _file = open(filepath,'r')
+ content = _file.read()
+ _file.close()
+ except:
+ traceback.print_exc()
+ content = ''
+ return content
+
+ def _doPageStart(self):
+ getattr(self, '_do%sStart'%(self.markup))()
+
+ def _doPageHeader(self):
+ if self.header != None:
+ if self.header.find('#$#') != -1 or \
+ self.header.find('#$#') != -1 or \
+ self.header.find('#%#') != -1:
+ self.out.write(self.header[3:])
+ else:
+ if self.header != '':
+ self.header = self._getFile(self.header)
+ getattr(self, '_do%sHeader'%(self.markup))()
+
+ def _doPageFooter(self):
+ if self.footer != None:
+ if self.footer.find('#$#') != -1 or \
+ self.footer.find('#@#') != -1 or \
+ self.footer.find('#%#') != -1:
+ self.out.write(self.footer[3:])
+ else:
+ if self.footer != '':
+ self.footer = self._getFile(self.footer)
+ getattr(self, '_do%sFooter'%(self.markup))()
+
+ def _doPageEnd(self):
+ getattr(self, '_do%sEnd'%(self.markup))()
+
+ ################################################### color/style retrieval
+ ## Some of these are not used anymore but are kept for documentation
+
+ def _getLineNumber(self):
+ num = self.linenum
+ self.linenum+=1
+ return str(num).rjust(5)+" "
+
+ def _getTags(self, key):
+ # style tags
+ return self.colors.get(key, self.colors[NAME])[0]
+
+ def _getForeColor(self, key):
+ # get text foreground color, if not set to black
+ color = self.colors.get(key, self.colors[NAME])[1]
+ if color[:1] != '#':
+ color = '#000000'
+ return color
+
+ def _getBackColor(self, key):
+ # get text background color
+ return self.colors.get(key, self.colors[NAME])[2]
+
+ def _getPageColor(self):
+ # get page background color
+ return self.colors.get(PAGEBACKGROUND, '#FFFFFF')
+
+ def _getStyle(self, key):
+ # get the token style from the color dictionary
+ return self.colors.get(key, self.colors[NAME])
+
+ def _getMarkupClass(self, key):
+ # get the markup class name from the markup dictionary
+ return MARKUPDICT.get(key, MARKUPDICT[NAME])
+
+ def _getDocumentCreatedBy(self):
+ return '<!--This document created by %s ver.%s on: %s-->\n'%(
+ __title__,__version__,time.ctime())
+
+ ################################################### HTML markup functions
+
+ def _doHTMLStart(self):
+ # Start of html page
+ self.out.write('<!DOCTYPE html PUBLIC \
+"-//W3C//DTD HTML 4.01//EN">\n')
+ self.out.write('<html><head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1">\n')
+ # Get background
+ self.out.write('</head><body bgcolor="%s">\n'%self._getPageColor())
+ self._doPageHeader()
+ self.out.write('<pre>')
+
+ def _getHTMLStyles(self, toktype, toktext):
+ # Get styles
+ tags, color = self.colors.get(toktype, self.colors[NAME])[:2]#
+ tagstart=[]
+ tagend=[]
+ # check for styles and set them if needed.
+ if 'b' in tags:#Bold
+ tagstart.append('<b>')
+ tagend.append('</b>')
+ if 'i' in tags:#Italics
+ tagstart.append('<i>')
+ tagend.append('</i>')
+ if 'u' in tags:#Underline
+ tagstart.append('<u>')
+ tagend.append('</u>')
+ # HTML tags should be paired like so : <b><i><u>Doh!</u></i></b>
+ tagend.reverse()
+ starttags="".join(tagstart)
+ endtags="".join(tagend)
+ return starttags,endtags,color
+
+ def _sendHTMLText(self, toktype, toktext):
+ numberlinks = self.numberlinks
+
+ # If it is an error, set a red box around the bad tokens
+ # older browsers should ignore it
+ if toktype == ERRORTOKEN:
+ style = ' style="border: solid 1.5pt #FF0000;"'
+ else:
+ style = ''
+ # Get styles
+ starttag, endtag, color = self._getHTMLStyles(toktype, toktext)
+ # This is a hack to 'fix' multi-line strings.
+ # Multi-line strings are treated as only one token
+ # even though they can be several physical lines.
+ # That makes it hard to spot the start of a line,
+ # because at this level all we know about are tokens.
+
+ if toktext.count(self.LINENUMHOLDER):
+ # rip apart the string and separate it by line.
+ # count lines and change all linenum token to line numbers.
+ # embedded all the new font tags inside the current one.
+ # Do this by ending the tag first then writing our new tags,
+ # then starting another font tag exactly like the first one.
+ if toktype == LINENUMBER:
+ splittext = toktext.split(self.LINENUMHOLDER)
+ else:
+ splittext = toktext.split(self.LINENUMHOLDER+' ')
+ store = []
+ store.append(splittext.pop(0))
+ lstarttag, lendtag, lcolor = self._getHTMLStyles(LINENUMBER, toktext)
+ count = len(splittext)
+ for item in splittext:
+ num = self._getLineNumber()
+ if numberlinks:
+ numstrip = num.strip()
+ content = '<a name="%s" href="#%s">%s</a>' \
+ %(numstrip,numstrip,num)
+ else:
+ content = num
+ if count <= 1:
+ endtag,starttag = '',''
+ linenumber = ''.join([endtag,'<font color=', lcolor, '>',
+ lstarttag, content, lendtag, '</font>' ,starttag])
+ store.append(linenumber+item)
+ toktext = ''.join(store)
+ # send text
+ ## Output optimization
+ # skip font tag if black text, but styles will still be sent. (b,u,i)
+ if color !='#000000':
+ startfont = '<font color="%s"%s>'%(color, style)
+ endfont = '</font>'
+ else:
+ startfont, endfont = ('','')
+ if toktype != LINENUMBER:
+ self.out.write(''.join([startfont,starttag,
+ toktext,endtag,endfont]))
+ else:
+ self.out.write(toktext)
+ return
+
+ def _doHTMLHeader(self):
+ # Optional
+ if self.header != '':
+ self.out.write('%s\n'%self.header)
+ else:
+ color = self._getForeColor(NAME)
+ self.out.write('<b><font color="%s"># %s \
+ <br># %s</font></b><hr>\n'%
+ (color, self.title, time.ctime()))
+
+ def _doHTMLFooter(self):
+ # Optional
+ if self.footer != '':
+ self.out.write('%s\n'%self.footer)
+ else:
+ color = self._getForeColor(NAME)
+ self.out.write('<b><font color="%s"> \
+ <hr># %s<br># %s</font></b>\n'%
+ (color, self.title, time.ctime()))
+
+ def _doHTMLEnd(self):
+ # End of html page
+ self.out.write('</pre>\n')
+ # Write a little info at the bottom
+ self._doPageFooter()
+ self.out.write('</body></html>\n')
+
+ #################################################### CSS markup functions
+
+ def _getCSSStyle(self, key):
+ # Get the tags and colors from the dictionary
+ tags, forecolor, backcolor = self._getStyle(key)
+ style=[]
+ border = None
+ bordercolor = None
+ tags = tags.lower()
+ if tags:
+ # get the border color if specified
+ # the border color will be appended to
+ # the list after we define a border
+ if '#' in tags:# border color
+ start = tags.find('#')
+ end = start + 7
+ bordercolor = tags[start:end]
+ tags.replace(bordercolor,'',1)
+ # text styles
+ if 'b' in tags:# Bold
+ style.append('font-weight:bold;')
+ else:
+ style.append('font-weight:normal;')
+ if 'i' in tags:# Italic
+ style.append('font-style:italic;')
+ if 'u' in tags:# Underline
+ style.append('text-decoration:underline;')
+ # border size
+ if 'l' in tags:# thick border
+ size='thick'
+ elif 'm' in tags:# medium border
+ size='medium'
+ elif 't' in tags:# thin border
+ size='thin'
+ else:# default
+ size='medium'
+ # border styles
+ if 'n' in tags:# inset border
+ border='inset'
+ elif 'o' in tags:# outset border
+ border='outset'
+ elif 'r' in tags:# ridge border
+ border='ridge'
+ elif 'g' in tags:# groove border
+ border='groove'
+ elif '=' in tags:# double border
+ border='double'
+ elif '.' in tags:# dotted border
+ border='dotted'
+ elif '-' in tags:# dashed border
+ border='dashed'
+ elif 's' in tags:# solid border
+ border='solid'
+ # border type check
+ seperate_sides=0
+ for side in ['<','>','^','v']:
+ if side in tags:
+ seperate_sides+=1
+ # border box or seperate sides
+ if seperate_sides==0 and border:
+ style.append('border: %s %s;'%(border,size))
+ else:
+ if border == None:
+ border = 'solid'
+ if 'v' in tags:# bottom border
+ style.append('border-bottom:%s %s;'%(border,size))
+ if '<' in tags:# left border
+ style.append('border-left:%s %s;'%(border,size))
+ if '>' in tags:# right border
+ style.append('border-right:%s %s;'%(border,size))
+ if '^' in tags:# top border
+ style.append('border-top:%s %s;'%(border,size))
+ else:
+ style.append('font-weight:normal;')# css inherited style fix
+ # we have to define our borders before we set colors
+ if bordercolor:
+ style.append('border-color:%s;'%bordercolor)
+ # text forecolor
+ style.append('color:%s;'% forecolor)
+ # text backcolor
+ if backcolor:
+ style.append('background-color:%s;'%backcolor)
+ return (self._getMarkupClass(key),' '.join(style))
+
+ def _sendCSSStyle(self, external=0):
+ """ create external and internal style sheets"""
+ styles = []
+ external += self.external
+ if not external:
+ styles.append('<style type="text/css">\n<!--\n')
+ # Get page background color and write styles ignore any we don't know
+ styles.append('body { background:%s; }\n'%self._getPageColor())
+ # write out the various css styles
+ for key in MARKUPDICT:
+ styles.append('.%s { %s }\n'%self._getCSSStyle(key))
+ # If you want to style the pre tag you must modify the color dict.
+ # Example:
+ # lite[PY] = .py {border: solid thin #000000;background:#555555}\n'''
+ styles.append(self.colors.get(PY, '.py { }\n'))
+ # Extra css can be added here
+ # add CSSHOOK to the color dict if you need it.
+ # Example:
+ #lite[CSSHOOK] = """.mytag { border: solid thin #000000; } \n
+ # .myothertag { font-weight:bold; )\n"""
+ styles.append(self.colors.get(CSSHOOK,''))
+ if not self.external:
+ styles.append('--></style>\n')
+ return ''.join(styles)
+
+ def _doCSSStart(self):
+ # Start of css/html 4.01 page
+ self.out.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">\n')
+ self.out.write('<html><head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1">\n')
+ self._doCSSStyleSheet()
+ self.out.write('</head>\n<body>\n')
+ # Write a little info at the top.
+ self._doPageHeader()
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+ return
+
+ def _doCSSStyleSheet(self):
+ if not self.external:
+ # write an embedded style sheet
+ self.out.write(self._sendCSSStyle())
+ else:
+ # write a link to an external style sheet
+ self.out.write('<link rel="stylesheet" \
+href="pystyle.css" type="text/css">')
+ return
+
+ def _sendCSSText(self, toktype, toktext):
+ # This is a hack to 'fix' multi-line strings.
+ # Multi-line strings are treated as only one token
+ # even though they can be several physical lines.
+ # That makes it hard to spot the start of a line,
+ # because at this level all we know about are tokens.
+ markupclass = MARKUPDICT.get(toktype, MARKUPDICT[NAME])
+ # if it is a LINENUMBER type then we can skip the rest
+ if toktext == self.LINESTART and toktype == LINENUMBER:
+ self.out.write('<span class="py_line">')
+ return
+ if toktext.count(self.LINENUMHOLDER):
+ # rip apart the string and separate it by line
+ # count lines and change all linenum token to line numbers
+ # also convert linestart and lineend tokens
+ # <linestart> <lnumstart> lnum <lnumend> text <lineend>
+ #################################################
+ newmarkup = MARKUPDICT.get(LINENUMBER, MARKUPDICT[NAME])
+ lstartspan = '<span class="%s">'%(newmarkup)
+ if toktype == LINENUMBER:
+ splittext = toktext.split(self.LINENUMHOLDER)
+ else:
+ splittext = toktext.split(self.LINENUMHOLDER+' ')
+ store = []
+ # we have already seen the first linenumber token
+ # so we can skip the first one
+ store.append(splittext.pop(0))
+ for item in splittext:
+ num = self._getLineNumber()
+ if self.numberlinks:
+ numstrip = num.strip()
+ content= '<a name="%s" href="#%s">%s</a>' \
+ %(numstrip,numstrip,num)
+ else:
+ content = num
+ linenumber= ''.join([lstartspan,content,'</span>'])
+ store.append(linenumber+item)
+ toktext = ''.join(store)
+ if toktext.count(self.LINESTART):
+ # wraps the textline in a line span
+ # this adds a lot of kludges, is it really worth it?
+ store = []
+ parts = toktext.split(self.LINESTART+' ')
+ # handle the first part differently
+ # the whole token gets wraqpped in a span later on
+ first = parts.pop(0)
+ # place spans before the newline
+ pos = first.rfind('\n')
+ if pos != -1:
+ first=first[:pos]+'</span></span>'+first[pos:]
+ store.append(first)
+ #process the rest of the string
+ for item in parts:
+ #handle line numbers if present
+ if self.dolinenums:
+ item = item.replace('</span>',
+ '</span><span class="%s">'%(markupclass))
+ else:
+ item = '<span class="%s">%s'%(markupclass,item)
+ # add endings for line and string tokens
+ pos = item.rfind('\n')
+ if pos != -1:
+ item=item[:pos]+'</span></span>\n'
+ store.append(item)
+ # add start tags for lines
+ toktext = '<span class="py_line">'.join(store)
+ # Send text
+ if toktype != LINENUMBER:
+ if toktype == TEXT and self.textFlag == 'DIV':
+ startspan = '<div class="%s">'%(markupclass)
+ endspan = '</div>'
+ elif toktype == TEXT and self.textFlag == 'RAW':
+ startspan,endspan = ('','')
+ else:
+ startspan = '<span class="%s">'%(markupclass)
+ endspan = '</span>'
+ self.out.write(''.join([startspan, toktext, endspan]))
+ else:
+ self.out.write(toktext)
+ return
+
+ def _doCSSHeader(self):
+ if self.header != '':
+ self.out.write('%s\n'%self.header)
+ else:
+ name = MARKUPDICT.get(NAME)
+ self.out.write('<div class="%s"># %s <br> \
+# %s</div><hr>\n'%(name, self.title, time.ctime()))
+
+ def _doCSSFooter(self):
+ # Optional
+ if self.footer != '':
+ self.out.write('%s\n'%self.footer)
+ else:
+ self.out.write('<hr><div class="%s"># %s <br> \
+# %s</div>\n'%(MARKUPDICT.get(NAME),self.title, time.ctime()))
+
+ def _doCSSEnd(self):
+ # End of css/html page
+ self.out.write(self.colors.get(CODEEND,'</pre>\n'))
+ # Write a little info at the bottom
+ self._doPageFooter()
+ self.out.write('</body></html>\n')
+ return
+
+ ################################################## XHTML markup functions
+
+ def _doXHTMLStart(self):
+ # XHTML is really just XML + HTML 4.01.
+ # We only need to change the page headers,
+ # and a few tags to get valid XHTML.
+ # Start of xhtml page
+ self.out.write('<?xml version="1.0"?>\n \
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n \
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n \
+<html xmlns="http://www.w3.org/1999/xhtml">\n')
+ self.out.write('<head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1"/>\n')
+ self._doXHTMLStyleSheet()
+ self.out.write('</head>\n<body>\n')
+ # Write a little info at the top.
+ self._doPageHeader()
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+ return
+
+ def _doXHTMLStyleSheet(self):
+ if not self.external:
+ # write an embedded style sheet
+ self.out.write(self._sendCSSStyle())
+ else:
+ # write a link to an external style sheet
+ self.out.write('<link rel="stylesheet" \
+href="pystyle.css" type="text/css"/>\n')
+ return
+
+ def _sendXHTMLText(self, toktype, toktext):
+ self._sendCSSText(toktype, toktext)
+
+ def _doXHTMLHeader(self):
+ # Optional
+ if self.header:
+ self.out.write('%s\n'%self.header)
+ else:
+ name = MARKUPDICT.get(NAME)
+ self.out.write('<div class="%s"># %s <br/> \
+# %s</div><hr/>\n '%(
+ name, self.title, time.ctime()))
+
+ def _doXHTMLFooter(self):
+ # Optional
+ if self.footer:
+ self.out.write('%s\n'%self.footer)
+ else:
+ self.out.write('<hr/><div class="%s"># %s <br/> \
+# %s</div>\n'%(MARKUPDICT.get(NAME), self.title, time.ctime()))
+
+ def _doXHTMLEnd(self):
+ self._doCSSEnd()
+
+#############################################################################
+
+if __name__ == '__main__':
+ cli()
+
+#############################################################################
+# PySourceColor.py
+# 2004, 2005 M.E.Farmer Jr.
+# Python license
diff --git a/paste/util/UserDict24.py b/paste/util/UserDict24.py
new file mode 100644
index 0000000..e5b64f5
--- /dev/null
+++ b/paste/util/UserDict24.py
@@ -0,0 +1,167 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+ def __init__(self, dict=None, **kwargs):
+ self.data = {}
+ if dict is not None:
+ if not hasattr(dict,'keys'):
+ dict = type({})(dict) # make mapping from a sequence
+ self.update(dict)
+ if len(kwargs):
+ self.update(kwargs)
+ def __repr__(self): return repr(self.data)
+ def __cmp__(self, dict):
+ if isinstance(dict, UserDict):
+ return cmp(self.data, dict.data)
+ else:
+ return cmp(self.data, dict)
+ def __len__(self): return len(self.data)
+ def __getitem__(self, key): return self.data[key]
+ def __setitem__(self, key, item): self.data[key] = item
+ def __delitem__(self, key): del self.data[key]
+ def clear(self): self.data.clear()
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data)
+ import copy
+ data = self.data
+ try:
+ self.data = {}
+ c = copy.copy(self)
+ finally:
+ self.data = data
+ c.update(self)
+ return c
+ def keys(self): return self.data.keys()
+ def items(self): return self.data.items()
+ def iteritems(self): return self.data.iteritems()
+ def iterkeys(self): return self.data.iterkeys()
+ def itervalues(self): return self.data.itervalues()
+ def values(self): return self.data.values()
+ def has_key(self, key): return self.data.has_key(key)
+ def update(self, dict):
+ if isinstance(dict, UserDict):
+ self.data.update(dict.data)
+ elif isinstance(dict, type(self.data)):
+ self.data.update(dict)
+ else:
+ for k, v in dict.items():
+ self[k] = v
+ def get(self, key, failobj=None):
+ if not self.has_key(key):
+ return failobj
+ return self[key]
+ def setdefault(self, key, failobj=None):
+ if not self.has_key(key):
+ self[key] = failobj
+ return self[key]
+ def pop(self, key, *args):
+ return self.data.pop(key, *args)
+ def popitem(self):
+ return self.data.popitem()
+ def __contains__(self, key):
+ return key in self.data
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+ fromkeys = classmethod(fromkeys)
+
+class IterableUserDict(UserDict):
+ def __iter__(self):
+ return iter(self.data)
+
+class DictMixin:
+ # Mixin defining all dictionary methods for classes that already have
+ # a minimum dictionary interface including getitem, setitem, delitem,
+ # and keys. Without knowledge of the subclass constructor, the mixin
+ # does not define __init__() or copy(). In addition to the four base
+ # methods, progressively more efficiency comes with defining
+ # __contains__(), __iter__(), and iteritems().
+
+ # second level definitions support higher levels
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+ def has_key(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+ def __contains__(self, key):
+ return self.has_key(key)
+
+ # third level takes advantage of second level definitions
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+ def iterkeys(self):
+ return self.__iter__()
+
+ # fourth level uses definitions from lower levels
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+ def values(self):
+ return [v for _, v in self.iteritems()]
+ def items(self):
+ return list(self.iteritems())
+ def clear(self):
+ for key in self.keys():
+ del self[key]
+ def setdefault(self, key, default):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError, 'container is empty'
+ del self[k]
+ return (k, v)
+ def update(self, other):
+ # Make progressively weaker assumptions about "other"
+ if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
+ for k, v in other.iteritems():
+ self[k] = v
+ elif hasattr(other, '__iter__'): # iter saves memory
+ for k in other:
+ self[k] = other[k]
+ else:
+ for k in other.keys():
+ self[k] = other[k]
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+ def __cmp__(self, other):
+ if other is None:
+ return 1
+ if isinstance(other, DictMixin):
+ other = dict(other.iteritems())
+ return cmp(dict(self.iteritems()), other)
+ def __len__(self):
+ return len(self.keys())
+
+ def __nonzero__(self):
+ return bool(self.iteritems())
diff --git a/paste/util/__init__.py b/paste/util/__init__.py
new file mode 100644
index 0000000..ea4ff1e
--- /dev/null
+++ b/paste/util/__init__.py
@@ -0,0 +1,4 @@
+"""
+Package for miscellaneous routines that do not depend on other parts
+of Paste
+"""
diff --git a/paste/util/classinit.py b/paste/util/classinit.py
new file mode 100644
index 0000000..e4e6b28
--- /dev/null
+++ b/paste/util/classinit.py
@@ -0,0 +1,42 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class ClassInitMeta(type):
+
+ def __new__(meta, class_name, bases, new_attrs):
+ cls = type.__new__(meta, class_name, bases, new_attrs)
+ if (new_attrs.has_key('__classinit__')
+ and not isinstance(cls.__classinit__, staticmethod)):
+ setattr(cls, '__classinit__',
+ staticmethod(cls.__classinit__.im_func))
+ if hasattr(cls, '__classinit__'):
+ cls.__classinit__(cls, new_attrs)
+ return cls
+
+def build_properties(cls, new_attrs):
+ """
+ Given a class and a new set of attributes (as passed in by
+ __classinit__), create or modify properties based on functions
+ with special names ending in __get, __set, and __del.
+ """
+ for name, value in new_attrs.items():
+ if (name.endswith('__get') or name.endswith('__set')
+ or name.endswith('__del')):
+ base = name[:-5]
+ if hasattr(cls, base):
+ old_prop = getattr(cls, base)
+ if not isinstance(old_prop, property):
+ raise ValueError(
+ "Attribute %s is a %s, not a property; function %s is named like a property"
+ % (base, type(old_prop), name))
+ attrs = {'fget': old_prop.fget,
+ 'fset': old_prop.fset,
+ 'fdel': old_prop.fdel,
+ 'doc': old_prop.__doc__}
+ else:
+ attrs = {}
+ attrs['f' + name[-3:]] = value
+ if name.endswith('__get') and value.__doc__:
+ attrs['doc'] = value.__doc__
+ new_prop = property(**attrs)
+ setattr(cls, base, new_prop)
diff --git a/paste/util/classinstance.py b/paste/util/classinstance.py
new file mode 100644
index 0000000..ac2be3e
--- /dev/null
+++ b/paste/util/classinstance.py
@@ -0,0 +1,38 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class classinstancemethod(object):
+ """
+ Acts like a class method when called from a class, like an
+ instance method when called by an instance. The method should
+ take two arguments, 'self' and 'cls'; one of these will be None
+ depending on how the method was called.
+ """
+
+ def __init__(self, func):
+ self.func = func
+ self.__doc__ = func.__doc__
+
+ def __get__(self, obj, type=None):
+ return _methodwrapper(self.func, obj=obj, type=type)
+
+class _methodwrapper(object):
+
+ def __init__(self, func, obj, type):
+ self.func = func
+ self.obj = obj
+ self.type = type
+
+ def __call__(self, *args, **kw):
+ assert not kw.has_key('self') and not kw.has_key('cls'), (
+ "You cannot use 'self' or 'cls' arguments to a "
+ "classinstancemethod")
+ return self.func(*((self.obj, self.type) + args), **kw)
+
+ def __repr__(self):
+ if self.obj is None:
+ return ('<bound class method %s.%s>'
+ % (self.type.__name__, self.func.func_name))
+ else:
+ return ('<bound method %s.%s of %r>'
+ % (self.type.__name__, self.func.func_name, self.obj))
diff --git a/paste/util/converters.py b/paste/util/converters.py
new file mode 100644
index 0000000..f0ad349
--- /dev/null
+++ b/paste/util/converters.py
@@ -0,0 +1,26 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+def asbool(obj):
+ if isinstance(obj, (str, unicode)):
+ obj = obj.strip().lower()
+ if obj in ['true', 'yes', 'on', 'y', 't', '1']:
+ return True
+ elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
+ return False
+ else:
+ raise ValueError(
+ "String is not true/false: %r" % obj)
+ return bool(obj)
+
+def aslist(obj, sep=None, strip=True):
+ if isinstance(obj, (str, unicode)):
+ lst = obj.split(sep)
+ if strip:
+ lst = [v.strip() for v in lst]
+ return lst
+ elif isinstance(obj, (list, tuple)):
+ return obj
+ elif obj is None:
+ return []
+ else:
+ return [obj]
diff --git a/paste/util/dateinterval.py b/paste/util/dateinterval.py
new file mode 100644
index 0000000..2195ab2
--- /dev/null
+++ b/paste/util/dateinterval.py
@@ -0,0 +1,103 @@
+"""
+DateInterval.py
+
+Convert interval strings (in the form of 1w2d, etc) to
+seconds, and back again. Is not exactly about months or
+years (leap years in particular).
+
+Accepts (y)ear, (b)month, (w)eek, (d)ay, (h)our, (m)inute, (s)econd.
+
+Exports only timeEncode and timeDecode functions.
+"""
+
+import re
+
+__all__ = ['interval_decode', 'interval_encode']
+
+second = 1
+minute = second*60
+hour = minute*60
+day = hour*24
+week = day*7
+month = day*30
+year = day*365
+timeValues = {
+ 'y': year,
+ 'b': month,
+ 'w': week,
+ 'd': day,
+ 'h': hour,
+ 'm': minute,
+ 's': second,
+ }
+timeOrdered = timeValues.items()
+timeOrdered.sort(lambda a, b: -cmp(a[1], b[1]))
+
+def interval_encode(seconds, include_sign=False):
+ """Encodes a number of seconds (representing a time interval)
+ into a form like 1h2d3s.
+
+ >>> interval_encode(10)
+ '10s'
+ >>> interval_encode(493939)
+ '5d17h12m19s'
+ """
+ s = ''
+ orig = seconds
+ seconds = abs(seconds)
+ for char, amount in timeOrdered:
+ if seconds >= amount:
+ i, seconds = divmod(seconds, amount)
+ s += '%i%s' % (i, char)
+ if orig < 0:
+ s = '-' + s
+ elif not orig:
+ return '0'
+ elif include_sign:
+ s = '+' + s
+ return s
+
+_timeRE = re.compile(r'[0-9]+[a-zA-Z]')
+def interval_decode(s):
+ """Decodes a number in the format 1h4d3m (1 hour, 3 days, 3 minutes)
+ into a number of seconds
+
+ >>> interval_decode('40s')
+ 40
+ >>> interval_decode('10000s')
+ 10000
+ >>> interval_decode('3d1w45s')
+ 864045
+ """
+ time = 0
+ sign = 1
+ s = s.strip()
+ if s.startswith('-'):
+ s = s[1:]
+ sign = -1
+ elif s.startswith('+'):
+ s = s[1:]
+ for match in allMatches(s, _timeRE):
+ char = match.group(0)[-1].lower()
+ if not timeValues.has_key(char):
+ # @@: should signal error
+ continue
+ time += int(match.group(0)[:-1]) * timeValues[char]
+ return time
+
+# @@-sgd 2002-12-23 - this function does not belong in this module, find a better place.
+def allMatches(source, regex):
+ """Return a list of matches for regex in source
+ """
+ pos = 0
+ end = len(source)
+ rv = []
+ match = regex.search(source, pos)
+ while match:
+ rv.append(match)
+ match = regex.search(source, match.end() )
+ return rv
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/datetimeutil.py b/paste/util/datetimeutil.py
new file mode 100644
index 0000000..b1a7f38
--- /dev/null
+++ b/paste/util/datetimeutil.py
@@ -0,0 +1,361 @@
+# (c) 2005 Clark C. Evans and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# Some of this code was funded by: http://prometheusresearch.com
+"""
+Date, Time, and Timespan Parsing Utilities
+
+This module contains parsing support to create "human friendly"
+``datetime`` object parsing. The explicit goal of these routines is
+to provide a multi-format date/time support not unlike that found in
+Microsoft Excel. In most approaches, the input is very "strict" to
+prevent errors -- however, this approach is much more liberal since we
+are assuming the user-interface is parroting back the normalized value
+and thus the user has immediate feedback if the data is not typed in
+correctly.
+
+ ``parse_date`` and ``normalize_date``
+
+ These functions take a value like '9 jan 2007' and returns either an
+ ``date`` object, or an ISO 8601 formatted date value such
+ as '2007-01-09'. There is an option to provide an Oracle database
+ style output as well, ``09 JAN 2007``, but this is not the default.
+
+ This module always treats '/' delimiters as using US date order
+ (since the author's clients are US based), hence '1/9/2007' is
+ January 9th. Since this module treats the '-' as following
+ European order this supports both modes of data-entry; together
+ with immediate parroting back the result to the screen, the author
+ has found this approach to work well in pratice.
+
+ ``parse_time`` and ``normalize_time``
+
+ These functions take a value like '1 pm' and returns either an
+ ``time`` object, or an ISO 8601 formatted 24h clock time
+ such as '13:00'. There is an option to provide for US style time
+ values, '1:00 PM', however this is not the default.
+
+ ``parse_datetime`` and ``normalize_datetime``
+
+ These functions take a value like '9 jan 2007 at 1 pm' and returns
+ either an ``datetime`` object, or an ISO 8601 formatted
+ return (without the T) such as '2007-01-09 13:00'. There is an
+ option to provide for Oracle / US style, '09 JAN 2007 @ 1:00 PM',
+ however this is not the default.
+
+ ``parse_delta`` and ``normalize_delta``
+
+ These functions take a value like '1h 15m' and returns either an
+ ``timedelta`` object, or an 2-decimal fixed-point
+ numerical value in hours, such as '1.25'. The rationale is to
+ support meeting or time-billing lengths, not to be an accurate
+ representation in mili-seconds. As such not all valid
+ ``timedelta`` values will have a normalized representation.
+
+"""
+from datetime import timedelta, time, date
+from time import localtime
+import string
+
+__all__ = ['parse_timedelta', 'normalize_timedelta',
+ 'parse_time', 'normalize_time',
+ 'parse_date', 'normalize_date']
+
+def _number(val):
+ try:
+ return string.atoi(val)
+ except:
+ return None
+
+#
+# timedelta
+#
+def parse_timedelta(val):
+ """
+ returns a ``timedelta`` object, or None
+ """
+ if not val:
+ return None
+ val = string.lower(val)
+ if "." in val:
+ val = float(val)
+ return timedelta(hours=int(val), minutes=60*(val % 1.0))
+ fHour = ("h" in val or ":" in val)
+ fMin = ("m" in val or ":" in val)
+ fFraction = "." in val
+ for noise in "minu:teshour()":
+ val = string.replace(val, noise, ' ')
+ val = string.strip(val)
+ val = string.split(val)
+ hr = 0.0
+ mi = 0
+ val.reverse()
+ if fHour:
+ hr = int(val.pop())
+ if fMin:
+ mi = int(val.pop())
+ if len(val) > 0 and not hr:
+ hr = int(val.pop())
+ return timedelta(hours=hr, minutes=mi)
+
+def normalize_timedelta(val):
+ """
+ produces a normalized string value of the timedelta
+
+ This module returns a normalized time span value consisting of the
+ number of hours in fractional form. For example '1h 15min' is
+ formatted as 01.25.
+ """
+ if type(val) == str:
+ val = parse_timedelta(val)
+ if not val:
+ return ''
+ hr = val.seconds/3600
+ mn = (val.seconds % 3600)/60
+ return "%d.%02d" % (hr, mn * 100/60)
+
+#
+# time
+#
+def parse_time(val):
+ if not val:
+ return None
+ hr = mi = 0
+ val = string.lower(val)
+ amflag = (-1 != string.find(val, 'a')) # set if AM is found
+ pmflag = (-1 != string.find(val, 'p')) # set if PM is found
+ for noise in ":amp.":
+ val = string.replace(val, noise, ' ')
+ val = string.split(val)
+ if len(val) > 1:
+ hr = int(val[0])
+ mi = int(val[1])
+ else:
+ val = val[0]
+ if len(val) < 1:
+ pass
+ elif 'now' == val:
+ tm = localtime()
+ hr = tm[3]
+ mi = tm[4]
+ elif 'noon' == val:
+ hr = 12
+ elif len(val) < 3:
+ hr = int(val)
+ if not amflag and not pmflag and hr < 7:
+ hr += 12
+ elif len(val) < 5:
+ hr = int(val[:-2])
+ mi = int(val[-2:])
+ else:
+ hr = int(val[:1])
+ if amflag and hr >= 12:
+ hr = hr - 12
+ if pmflag and hr < 12:
+ hr = hr + 12
+ return time(hr, mi)
+
+def normalize_time(value, ampm):
+ if not value:
+ return ''
+ if type(value) == str:
+ value = parse_time(value)
+ if not ampm:
+ return "%02d:%02d" % (value.hour, value.minute)
+ hr = value.hour
+ am = "AM"
+ if hr < 1 or hr > 23:
+ hr = 12
+ elif hr >= 12:
+ am = "PM"
+ if hr > 12:
+ hr = hr - 12
+ return "%02d:%02d %s" % (hr, value.minute, am)
+
+#
+# Date Processing
+#
+
+_one_day = timedelta(days=1)
+
+_str2num = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6,
+ 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 }
+
+def _month(val):
+ for (key, mon) in _str2num.items():
+ if key in val:
+ return mon
+ raise TypeError("unknown month '%s'" % val)
+
+_days_in_month = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
+ 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31,
+ }
+_num2str = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
+ 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec',
+ }
+_wkdy = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
+
+def parse_date(val):
+ if not(val):
+ return None
+ val = string.lower(val)
+ now = None
+
+ # optimized check for YYYY-MM-DD
+ strict = val.split("-")
+ if len(strict) == 3:
+ (y, m, d) = strict
+ if "+" in d:
+ d = d.split("+")[0]
+ if " " in d:
+ d = d.split(" ")[0]
+ try:
+ now = date(int(y), int(m), int(d))
+ val = "xxx" + val[10:]
+ except ValueError:
+ pass
+
+ # allow for 'now', 'mon', 'tue', etc.
+ if not now:
+ chk = val[:3]
+ if chk in ('now','tod'):
+ now = date.today()
+ elif chk in _wkdy:
+ now = date.today()
+ idx = list(_wkdy).index(chk) + 1
+ while now.isoweekday() != idx:
+ now += _one_day
+
+ # allow dates to be modified via + or - /w number of days, so
+ # that now+3 is three days from now
+ if now:
+ tail = val[3:].strip()
+ tail = tail.replace("+"," +").replace("-"," -")
+ for item in tail.split():
+ try:
+ days = int(item)
+ except ValueError:
+ pass
+ else:
+ now += timedelta(days=days)
+ return now
+
+ # ok, standard parsing
+ yr = mo = dy = None
+ for noise in ('/', '-', ',', '*'):
+ val = string.replace(val, noise, ' ')
+ for noise in _wkdy:
+ val = string.replace(val, noise, ' ')
+ out = []
+ last = False
+ ldig = False
+ for ch in val:
+ if ch.isdigit():
+ if last and not ldig:
+ out.append(' ')
+ last = ldig = True
+ else:
+ if ldig:
+ out.append(' ')
+ ldig = False
+ last = True
+ out.append(ch)
+ val = string.split("".join(out))
+ if 3 == len(val):
+ a = _number(val[0])
+ b = _number(val[1])
+ c = _number(val[2])
+ if len(val[0]) == 4:
+ yr = a
+ if b: # 1999 6 23
+ mo = b
+ dy = c
+ else: # 1999 Jun 23
+ mo = _month(val[1])
+ dy = c
+ elif a > 0:
+ yr = c
+ if len(val[2]) < 4:
+ raise TypeError("four digit year required")
+ if b: # 6 23 1999
+ dy = b
+ mo = a
+ else: # 23 Jun 1999
+ dy = a
+ mo = _month(val[1])
+ else: # Jun 23, 2000
+ dy = b
+ yr = c
+ if len(val[2]) < 4:
+ raise TypeError("four digit year required")
+ mo = _month(val[0])
+ elif 2 == len(val):
+ a = _number(val[0])
+ b = _number(val[1])
+ if a > 999:
+ yr = a
+ dy = 1
+ if b > 0: # 1999 6
+ mo = b
+ else: # 1999 Jun
+ mo = _month(val[1])
+ elif a > 0:
+ if b > 999: # 6 1999
+ mo = a
+ yr = b
+ dy = 1
+ elif b > 0: # 6 23
+ mo = a
+ dy = b
+ else: # 23 Jun
+ dy = a
+ mo = _month(val[1])
+ else:
+ if b > 999: # Jun 2001
+ yr = b
+ dy = 1
+ else: # Jun 23
+ dy = b
+ mo = _month(val[0])
+ elif 1 == len(val):
+ val = val[0]
+ if not val.isdigit():
+ mo = _month(val)
+ if mo is not None:
+ dy = 1
+ else:
+ v = _number(val)
+ val = str(v)
+ if 8 == len(val): # 20010623
+ yr = _number(val[:4])
+ mo = _number(val[4:6])
+ dy = _number(val[6:])
+ elif len(val) in (3,4):
+ if v > 1300: # 2004
+ yr = v
+ mo = 1
+ dy = 1
+ else: # 1202
+ mo = _number(val[:-2])
+ dy = _number(val[-2:])
+ elif v < 32:
+ dy = v
+ else:
+ raise TypeError("four digit year required")
+ tm = localtime()
+ if mo is None:
+ mo = tm[1]
+ if dy is None:
+ dy = tm[2]
+ if yr is None:
+ yr = tm[0]
+ return date(yr, mo, dy)
+
+def normalize_date(val, iso8601=True):
+ if not val:
+ return ''
+ if type(val) == str:
+ val = parse_date(val)
+ if iso8601:
+ return "%4d-%02d-%02d" % (val.year, val.month, val.day)
+ return "%02d %s %4d" % (val.day, _num2str[val.month], val.year)
diff --git a/paste/util/doctest24.py b/paste/util/doctest24.py
new file mode 100644
index 0000000..28849ed
--- /dev/null
+++ b/paste/util/doctest24.py
@@ -0,0 +1,2665 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re, types
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ flag = 1 << len(OPTIONFLAGS_BY_NAME)
+ OPTIONFLAGS_BY_NAME[name] = flag
+ return flag
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+
+ >>> is_private("a.b", "my_func")
+ False
+ >>> is_private("____", "_my_func")
+ True
+ >>> is_private("someclass", "__init__")
+ False
+ >>> is_private("sometypo", "__init_")
+ True
+ >>> is_private("x.y.z", "_")
+ True
+ >>> is_private("_x.y.z", "__")
+ False
+ >>> is_private("", "") # senseless but consistent
+ False
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, (str, unicode)):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ """
+ Essentially the only subtle case:
+ >>> _ellipsis_match('aa...aa', 'aaa')
+ False
+ """
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError, 'Expected a module: %r' % module
+ if path.startswith('/'):
+ raise ValueError, 'Module-relative files may not have absolute paths'
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, basestring), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.func_globals
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print 'Finding tests in %s' % name
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, basestring):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, basestring)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).im_func
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, basestring):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, basestring):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.im_func
+ if inspect.isfunction(obj): obj = obj.func_code
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ """
+ A class used to run DocTest test cases, and accumulate statistics.
+ The `run` method is used to process a single DocTest case. It
+ returns a tuple `(f, t)`, where `t` is the number of test cases
+ tried, and `f` is the number of test cases that failed.
+
+ >>> tests = DocTestFinder().find(_TestClass)
+ >>> runner = DocTestRunner(verbose=False)
+ >>> for test in tests:
+ ... print runner.run(test)
+ (0, 2)
+ (0, 1)
+ (0, 2)
+ (0, 2)
+
+ The `summarize` method prints a summary of all the test cases that
+ have been run by the runner, and returns an aggregated `(f, t)`
+ tuple:
+
+ >>> runner.summarize(verbose=1)
+ 4 items passed all tests:
+ 2 tests in _TestClass
+ 2 tests in _TestClass.__init__
+ 2 tests in _TestClass.get
+ 1 tests in _TestClass.square
+ 7 tests in 4 items.
+ 7 passed and 0 failed.
+ Test passed.
+ (0, 7)
+
+ The aggregated number of tried examples and failed examples is
+ also available via the `tries` and `failures` attributes:
+
+ >>> runner.tries
+ 7
+ >>> runner.failures
+ 0
+
+ The comparison between expected outputs and actual outputs is done
+ by an `OutputChecker`. This comparison may be customized with a
+ number of option flags; see the documentation for `testmod` for
+ more information. If the option flags are insufficient, then the
+ comparison may also be customized by passing a subclass of
+ `OutputChecker` to the constructor.
+
+ The test runner's display output can be controlled in two ways.
+ First, an output function (`out) can be passed to
+ `TestRunner.run`; this function will be called with strings that
+ should be displayed. It defaults to `sys.stdout.write`. If
+ capturing the output is not sufficient, then the display output
+ can be also customized by subclassing DocTestRunner, and
+ overriding the methods `report_start`, `report_success`,
+ `report_unexpected_exception`, and `report_failure`.
+ """
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec compile(example.source, filename, "single",
+ compileflags, 1) in test.globs
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename, module_globals=None):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)#?, module_globals)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print len(notests), "items had no tests:"
+ notests.sort()
+ for thing in notests:
+ print " ", thing
+ if passed:
+ print len(passed), "items passed all tests:"
+ passed.sort()
+ for thing, count in passed:
+ print " %3d tests in %s" % (count, thing)
+ if failed:
+ print self.DIVIDER
+ print len(failed), "items had failures:"
+ failed.sort()
+ for thing, (f, t) in failed:
+ print " %3d of %3d in %s" % (f, t, thing)
+ if verbose:
+ print totalt, "tests in", len(self._name2ft), "items."
+ print totalt - totalf, "passed and", totalf, "failed."
+ if totalf:
+ print "***Test Failed***", totalf, "failures."
+ elif verbose:
+ print "Test passed."
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ print "*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes."
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+ r"""Run doc tests but raise an exception as soon as there is a failure.
+
+ If an unexpected exception occurs, an UnexpectedException is raised.
+ It contains the test, the example, and the original exception:
+
+ >>> runner = DebugRunner(verbose=False)
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> try:
+ ... runner.run(test)
+ ... except UnexpectedException, failure:
+ ... pass
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ We wrap the original exception to give the calling application
+ access to the test and example information.
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> try:
+ ... runner.run(test)
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ If a failure or error occurs, the globals are left intact:
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 1}
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... >>> raise KeyError
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ Traceback (most recent call last):
+ ...
+ UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 2}
+
+ But the globals are cleared if there is no error:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ (0, 1)
+
+ >>> test.globs
+ {}
+
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print "Running string", name
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print f, "of", t, "examples failed in string", name
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ """Sets the unittest option flags.
+
+ The old flag is returned so that a runner could restore the old
+ value if it wished to:
+
+ >>> old = _unittest_reportflags
+ >>> set_unittest_reportflags(REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE) == old
+ True
+
+ >>> import doctest
+ >>> doctest._unittest_reportflags == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+
+ Only reporting flags can be set:
+
+ >>> set_unittest_reportflags(ELLIPSIS)
+ Traceback (most recent call last):
+ ...
+ ValueError: ('Only reporting flags allowed', 8)
+
+ >>> set_unittest_reportflags(old) == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+ """
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ r"""Run the test case without results and without catching exceptions
+
+ The unit test framework includes a debug method on test cases
+ and test suites to support post-mortem debugging. The test code
+ is run in such a way that errors are not caught. This way a
+ caller can catch the errors and initiate post-mortem debugging.
+
+ The DocTestCase provides a debug method that raises
+ UnexpectedException errors if there is an unexepcted
+ exception:
+
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+ >>> try:
+ ... case.debug()
+ ... except UnexpectedException, failure:
+ ... pass
+
+ The UnexpectedException contains the test, the example, and
+ the original exception:
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+
+ >>> try:
+ ... case.debug()
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ """
+
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ r"""Extract script from text with examples.
+
+ Converts text with examples to a Python script. Example input is
+ converted to regular code. Example output and all other words
+ are converted to comments:
+
+ >>> text = '''
+ ... Here are examples of simple math.
+ ...
+ ... Python has super accurate integer addition
+ ...
+ ... >>> 2 + 2
+ ... 5
+ ...
+ ... And very friendly error messages:
+ ...
+ ... >>> 1/0
+ ... To Infinity
+ ... And
+ ... Beyond
+ ...
+ ... You can use logic if you want:
+ ...
+ ... >>> if 0:
+ ... ... blah
+ ... ... blah
+ ... ...
+ ...
+ ... Ho hum
+ ... '''
+
+ >>> print script_from_examples(text)
+ # Here are examples of simple math.
+ #
+ # Python has super accurate integer addition
+ #
+ 2 + 2
+ # Expected:
+ ## 5
+ #
+ # And very friendly error messages:
+ #
+ 1/0
+ # Expected:
+ ## To Infinity
+ ## And
+ ## Beyond
+ #
+ # You can use logic if you want:
+ #
+ if 0:
+ blah
+ blah
+ #
+ # Ho hum
+ """
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ return '\n'.join(output)
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ execfile(srcfilename, globs, globs)
+ except:
+ print sys.exc_info()[1]
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+######################################################################
+## 10. Example Usage
+######################################################################
+class _TestClass:
+ """
+ A pointless class, for sanity-checking of docstring testing.
+
+ Methods:
+ square()
+ get()
+
+ >>> _TestClass(13).get() + _TestClass(-12).get()
+ 1
+ >>> hex(_TestClass(13).square().get())
+ '0xa9'
+ """
+
+ def __init__(self, val):
+ """val -> _TestClass object with associated value val.
+
+ >>> t = _TestClass(123)
+ >>> print t.get()
+ 123
+ """
+
+ self.val = val
+
+ def square(self):
+ """square() -> square TestClass's associated value
+
+ >>> _TestClass(13).square().get()
+ 169
+ """
+
+ self.val = self.val ** 2
+ return self
+
+ def get(self):
+ """get() -> return TestClass's associated value.
+
+ >>> x = _TestClass(-42)
+ >>> print x.get()
+ -42
+ """
+
+ return self.val
+
+__test__ = {"_TestClass": _TestClass,
+ "string": r"""
+ Example of a string object, searched as-is.
+ >>> x = 1; y = 2
+ >>> x + y, x * y
+ (3, 2)
+ """,
+
+ "bool-int equivalence": r"""
+ In 2.2, boolean expressions displayed
+ 0 or 1. By default, we still accept
+ them. This can be disabled by passing
+ DONT_ACCEPT_TRUE_FOR_1 to the new
+ optionflags argument.
+ >>> 4 == 4
+ 1
+ >>> 4 == 4
+ True
+ >>> 4 > 4
+ 0
+ >>> 4 > 4
+ False
+ """,
+
+ "blank lines": r"""
+ Blank lines can be marked with <BLANKLINE>:
+ >>> print 'foo\n\nbar\n'
+ foo
+ <BLANKLINE>
+ bar
+ <BLANKLINE>
+ """,
+
+ "ellipsis": r"""
+ If the ellipsis flag is used, then '...' can be used to
+ elide substrings in the desired output:
+ >>> print range(1000) #doctest: +ELLIPSIS
+ [0, 1, 2, ..., 999]
+ """,
+
+ "whitespace normalization": r"""
+ If the whitespace normalization flag is used, then
+ differences in whitespace are ignored.
+ >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29]
+ """,
+ }
+
+def _test():
+ r = unittest.TextTestRunner()
+ r.run(DocTestSuite())
+
+if __name__ == "__main__":
+ _test()
diff --git a/paste/util/filemixin.py b/paste/util/filemixin.py
new file mode 100644
index 0000000..10a9e7c
--- /dev/null
+++ b/paste/util/filemixin.py
@@ -0,0 +1,53 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class FileMixin(object):
+
+ """
+ Used to provide auxiliary methods to objects simulating files.
+ Objects must implement write, and read if they are input files.
+ Also they should implement close.
+
+ Other methods you may wish to override:
+ * flush()
+ * seek(offset[, whence])
+ * tell()
+ * truncate([size])
+
+ Attributes you may wish to provide:
+ * closed
+ * encoding (you should also respect that in write())
+ * mode
+ * newlines (hard to support)
+ * softspace
+ """
+
+ def flush(self):
+ pass
+
+ def next(self):
+ return self.readline()
+
+ def readline(self, size=None):
+ # @@: This is a lame implementation; but a buffer would probably
+ # be necessary for a better implementation
+ output = []
+ while 1:
+ next = self.read(1)
+ if not next:
+ return ''.join(output)
+ output.append(next)
+ if size and size > 0 and len(output) >= size:
+ return ''.join(output)
+ if next == '\n':
+ # @@: also \r?
+ return ''.join(output)
+
+ def xreadlines(self):
+ return self
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+
diff --git a/paste/util/finddata.py b/paste/util/finddata.py
new file mode 100644
index 0000000..af29c04
--- /dev/null
+++ b/paste/util/finddata.py
@@ -0,0 +1,99 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# Note: you may want to copy this into your setup.py file verbatim, as
+# you can't import this from another package, when you don't know if
+# that package is installed yet.
+
+import os
+import sys
+from fnmatch import fnmatchcase
+from distutils.util import convert_path
+
+# Provided as an attribute, so you can append to these instead
+# of replicating them:
+standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
+standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
+ './dist', 'EGG-INFO', '*.egg-info')
+
+def find_package_data(
+ where='.', package='',
+ exclude=standard_exclude,
+ exclude_directories=standard_exclude_directories,
+ only_in_packages=True,
+ show_ignored=False):
+ """
+ Return a dictionary suitable for use in ``package_data``
+ in a distutils ``setup.py`` file.
+
+ The dictionary looks like::
+
+ {'package': [files]}
+
+ Where ``files`` is a list of all the files in that package that
+ don't match anything in ``exclude``.
+
+ If ``only_in_packages`` is true, then top-level directories that
+ are not packages won't be included (but directories under packages
+ will).
+
+ Directories matching any pattern in ``exclude_directories`` will
+ be ignored; by default directories with leading ``.``, ``CVS``,
+ and ``_darcs`` will be ignored.
+
+ If ``show_ignored`` is true, then all the files that aren't
+ included in package data are shown on stderr (for debugging
+ purposes).
+
+ Note patterns use wildcards, or can be exact paths (including
+ leading ``./``), and all searching is case-insensitive.
+ """
+
+ out = {}
+ stack = [(convert_path(where), '', package, only_in_packages)]
+ while stack:
+ where, prefix, package, only_in_packages = stack.pop(0)
+ for name in os.listdir(where):
+ fn = os.path.join(where, name)
+ if os.path.isdir(fn):
+ bad_name = False
+ for pattern in exclude_directories:
+ if (fnmatchcase(name, pattern)
+ or fn.lower() == pattern.lower()):
+ bad_name = True
+ if show_ignored:
+ print >> sys.stderr, (
+ "Directory %s ignored by pattern %s"
+ % (fn, pattern))
+ break
+ if bad_name:
+ continue
+ if (os.path.isfile(os.path.join(fn, '__init__.py'))
+ and not prefix):
+ if not package:
+ new_package = name
+ else:
+ new_package = package + '.' + name
+ stack.append((fn, '', new_package, False))
+ else:
+ stack.append((fn, prefix + name + '/', package, only_in_packages))
+ elif package or not only_in_packages:
+ # is a file
+ bad_name = False
+ for pattern in exclude:
+ if (fnmatchcase(name, pattern)
+ or fn.lower() == pattern.lower()):
+ bad_name = True
+ if show_ignored:
+ print >> sys.stderr, (
+ "File %s ignored by pattern %s"
+ % (fn, pattern))
+ break
+ if bad_name:
+ continue
+ out.setdefault(package, []).append(prefix+name)
+ return out
+
+if __name__ == '__main__':
+ import pprint
+ pprint.pprint(
+ find_package_data(show_ignored=True))
diff --git a/paste/util/findpackage.py b/paste/util/findpackage.py
new file mode 100644
index 0000000..68b5e8b
--- /dev/null
+++ b/paste/util/findpackage.py
@@ -0,0 +1,26 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+import sys
+import os
+
+def find_package(dir):
+ """
+ Given a directory, finds the equivalent package name. If it
+ is directly in sys.path, returns ''.
+ """
+ dir = os.path.abspath(dir)
+ orig_dir = dir
+ path = map(os.path.abspath, sys.path)
+ packages = []
+ last_dir = None
+ while 1:
+ if dir in path:
+ return '.'.join(packages)
+ packages.insert(0, os.path.basename(dir))
+ dir = os.path.dirname(dir)
+ if last_dir == dir:
+ raise ValueError(
+ "%s is not under any path found in sys.path" % orig_dir)
+ last_dir = dir
+
diff --git a/paste/util/import_string.py b/paste/util/import_string.py
new file mode 100644
index 0000000..3feb4dd
--- /dev/null
+++ b/paste/util/import_string.py
@@ -0,0 +1,95 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+'imports' a string -- converts a string to a Python object, importing
+any necessary modules and evaluating the expression. Everything
+before the : in an import expression is the module path; everything
+after is an expression to be evaluated in the namespace of that
+module.
+
+Alternately, if no : is present, then import the modules and get the
+attributes as necessary. Arbitrary expressions are not allowed in
+that case.
+"""
+
+def eval_import(s):
+ """
+ Import a module, or import an object from a module.
+
+ A module name like ``foo.bar:baz()`` can be used, where
+ ``foo.bar`` is the module, and ``baz()`` is an expression
+ evaluated in the context of that module. Note this is not safe on
+ arbitrary strings because of the eval.
+ """
+ if ':' not in s:
+ return simple_import(s)
+ module_name, expr = s.split(':', 1)
+ module = import_module(module_name)
+ obj = eval(expr, module.__dict__)
+ return obj
+
+def simple_import(s):
+ """
+ Import a module, or import an object from a module.
+
+ A name like ``foo.bar.baz`` can be a module ``foo.bar.baz`` or a
+ module ``foo.bar`` with an object ``baz`` in it, or a module
+ ``foo`` with an object ``bar`` with an attribute ``baz``.
+ """
+ parts = s.split('.')
+ module = import_module(parts[0])
+ name = parts[0]
+ parts = parts[1:]
+ last_import_error = None
+ while parts:
+ name += '.' + parts[0]
+ try:
+ module = import_module(name)
+ parts = parts[1:]
+ except ImportError, e:
+ last_import_error = e
+ break
+ obj = module
+ while parts:
+ try:
+ obj = getattr(module, parts[0])
+ except AttributeError:
+ raise ImportError(
+ "Cannot find %s in module %r (stopped importing modules with error %s)" % (parts[0], module, last_import_error))
+ parts = parts[1:]
+ return obj
+
+def import_module(s):
+ """
+ Import a module.
+ """
+ mod = __import__(s)
+ parts = s.split('.')
+ for part in parts[1:]:
+ mod = getattr(mod, part)
+ return mod
+
+def try_import_module(module_name):
+ """
+ Imports a module, but catches import errors. Only catches errors
+ when that module doesn't exist; if that module itself has an
+ import error it will still get raised. Returns None if the module
+ doesn't exist.
+ """
+ try:
+ return import_module(module_name)
+ except ImportError, e:
+ if not getattr(e, 'args', None):
+ raise
+ desc = e.args[0]
+ if not desc.startswith('No module named '):
+ raise
+ desc = desc[len('No module named '):]
+ # If you import foo.bar.baz, the bad import could be any
+ # of foo.bar.baz, bar.baz, or baz; we'll test them all:
+ parts = module_name.split('.')
+ for i in range(len(parts)):
+ if desc == '.'.join(parts[i:]):
+ return None
+ raise
diff --git a/paste/util/intset.py b/paste/util/intset.py
new file mode 100644
index 0000000..3873c75
--- /dev/null
+++ b/paste/util/intset.py
@@ -0,0 +1,511 @@
+# -*- coding: iso-8859-15 -*-
+"""Immutable integer set type.
+
+Integer set class.
+
+Copyright (C) 2006, Heiko Wundram.
+Released under the MIT license.
+"""
+
+# Version information
+# -------------------
+
+__author__ = "Heiko Wundram <me@modelnine.org>"
+__version__ = "0.2"
+__revision__ = "6"
+__date__ = "2006-01-20"
+
+
+# Utility classes
+# ---------------
+
+class _Infinity(object):
+ """Internal type used to represent infinity values."""
+
+ __slots__ = ["_neg"]
+
+ def __init__(self,neg):
+ self._neg = neg
+
+ def __lt__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return ( self._neg and
+ not ( isinstance(value,_Infinity) and value._neg ) )
+
+ def __le__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return self._neg
+
+ def __gt__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not ( self._neg or
+ ( isinstance(value,_Infinity) and not value._neg ) )
+
+ def __ge__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not self._neg
+
+ def __eq__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return isinstance(value,_Infinity) and self._neg == value._neg
+
+ def __ne__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not isinstance(value,_Infinity) or self._neg <> value._neg
+
+ def __repr__(self):
+ return "None"
+
+
+# Constants
+# ---------
+
+_MININF = _Infinity(True)
+_MAXINF = _Infinity(False)
+
+
+# Integer set class
+# -----------------
+
+class IntSet(object):
+ """Integer set class with efficient storage in a RLE format of ranges.
+ Supports minus and plus infinity in the range."""
+
+ __slots__ = ["_ranges","_min","_max","_hash"]
+
+ def __init__(self,*args,**kwargs):
+ """Initialize an integer set. The constructor accepts an unlimited
+ number of arguments that may either be tuples in the form of
+ (start,stop) where either start or stop may be a number or None to
+ represent maximum/minimum in that direction. The range specified by
+ (start,stop) is always inclusive (differing from the builtin range
+ operator).
+
+ Keyword arguments that can be passed to an integer set are min and
+ max, which specify the minimum and maximum number in the set,
+ respectively. You can also pass None here to represent minus or plus
+ infinity, which is also the default.
+ """
+
+ # Special case copy constructor.
+ if len(args) == 1 and isinstance(args[0],IntSet):
+ if kwargs:
+ raise ValueError("No keyword arguments for copy constructor.")
+ self._min = args[0]._min
+ self._max = args[0]._max
+ self._ranges = args[0]._ranges
+ self._hash = args[0]._hash
+ return
+
+ # Initialize set.
+ self._ranges = []
+
+ # Process keyword arguments.
+ self._min = kwargs.pop("min",_MININF)
+ self._max = kwargs.pop("max",_MAXINF)
+ if self._min is None:
+ self._min = _MININF
+ if self._max is None:
+ self._max = _MAXINF
+
+ # Check keyword arguments.
+ if kwargs:
+ raise ValueError("Invalid keyword argument.")
+ if not ( isinstance(self._min,(int,long)) or self._min is _MININF ):
+ raise TypeError("Invalid type of min argument.")
+ if not ( isinstance(self._max,(int,long)) or self._max is _MAXINF ):
+ raise TypeError("Invalid type of max argument.")
+ if ( self._min is not _MININF and self._max is not _MAXINF and
+ self._min > self._max ):
+ raise ValueError("Minimum is not smaller than maximum.")
+ if isinstance(self._max,(int,long)):
+ self._max += 1
+
+ # Process arguments.
+ for arg in args:
+ if isinstance(arg,(int,long)):
+ start, stop = arg, arg+1
+ elif isinstance(arg,tuple):
+ if len(arg) <> 2:
+ raise ValueError("Invalid tuple, must be (start,stop).")
+
+ # Process argument.
+ start, stop = arg
+ if start is None:
+ start = self._min
+ if stop is None:
+ stop = self._max
+
+ # Check arguments.
+ if not ( isinstance(start,(int,long)) or start is _MININF ):
+ raise TypeError("Invalid type of tuple start.")
+ if not ( isinstance(stop,(int,long)) or stop is _MAXINF ):
+ raise TypeError("Invalid type of tuple stop.")
+ if ( start is not _MININF and stop is not _MAXINF and
+ start > stop ):
+ continue
+ if isinstance(stop,(int,long)):
+ stop += 1
+ else:
+ raise TypeError("Invalid argument.")
+
+ if start > self._max:
+ continue
+ elif start < self._min:
+ start = self._min
+ if stop < self._min:
+ continue
+ elif stop > self._max:
+ stop = self._max
+ self._ranges.append((start,stop))
+
+ # Normalize set.
+ self._normalize()
+
+ # Utility functions for set operations
+ # ------------------------------------
+
+ def _iterranges(self,r1,r2,minval=_MININF,maxval=_MAXINF):
+ curval = minval
+ curstates = {"r1":False,"r2":False}
+ imax, jmax = 2*len(r1), 2*len(r2)
+ i, j = 0, 0
+ while i < imax or j < jmax:
+ if i < imax and ( ( j < jmax and
+ r1[i>>1][i&1] < r2[j>>1][j&1] ) or
+ j == jmax ):
+ cur_r, newname, newstate = r1[i>>1][i&1], "r1", not (i&1)
+ i += 1
+ else:
+ cur_r, newname, newstate = r2[j>>1][j&1], "r2", not (j&1)
+ j += 1
+ if curval < cur_r:
+ if cur_r > maxval:
+ break
+ yield curstates, (curval,cur_r)
+ curval = cur_r
+ curstates[newname] = newstate
+ if curval < maxval:
+ yield curstates, (curval,maxval)
+
+ def _normalize(self):
+ self._ranges.sort()
+ i = 1
+ while i < len(self._ranges):
+ if self._ranges[i][0] < self._ranges[i-1][1]:
+ self._ranges[i-1] = (self._ranges[i-1][0],
+ max(self._ranges[i-1][1],
+ self._ranges[i][1]))
+ del self._ranges[i]
+ else:
+ i += 1
+ self._ranges = tuple(self._ranges)
+ self._hash = hash(self._ranges)
+
+ def __coerce__(self,other):
+ if isinstance(other,IntSet):
+ return self, other
+ elif isinstance(other,(int,long,tuple)):
+ try:
+ return self, self.__class__(other)
+ except TypeError:
+ # Catch a type error, in that case the structure specified by
+ # other is something we can't coerce, return NotImplemented.
+ # ValueErrors are not caught, they signal that the data was
+ # invalid for the constructor. This is appropriate to signal
+ # as a ValueError to the caller.
+ return NotImplemented
+ elif isinstance(other,list):
+ try:
+ return self, self.__class__(*other)
+ except TypeError:
+ # See above.
+ return NotImplemented
+ return NotImplemented
+
+ # Set function definitions
+ # ------------------------
+
+ def _make_function(name,type,doc,pall,pany=None):
+ """Makes a function to match two ranges. Accepts two types: either
+ 'set', which defines a function which returns a set with all ranges
+ matching pall (pany is ignored), or 'bool', which returns True if pall
+ matches for all ranges and pany matches for any one range. doc is the
+ dostring to give this function. pany may be none to ignore the any
+ match.
+
+ The predicates get a dict with two keys, 'r1', 'r2', which denote
+ whether the current range is present in range1 (self) and/or range2
+ (other) or none of the two, respectively."""
+
+ if type == "set":
+ def f(self,other):
+ coerced = self.__coerce__(other)
+ if coerced is NotImplemented:
+ return NotImplemented
+ other = coerced[1]
+ newset = self.__class__.__new__(self.__class__)
+ newset._min = min(self._min,other._min)
+ newset._max = max(self._max,other._max)
+ newset._ranges = []
+ for states, (start,stop) in \
+ self._iterranges(self._ranges,other._ranges,
+ newset._min,newset._max):
+ if pall(states):
+ if newset._ranges and newset._ranges[-1][1] == start:
+ newset._ranges[-1] = (newset._ranges[-1][0],stop)
+ else:
+ newset._ranges.append((start,stop))
+ newset._ranges = tuple(newset._ranges)
+ newset._hash = hash(self._ranges)
+ return newset
+ elif type == "bool":
+ def f(self,other):
+ coerced = self.__coerce__(other)
+ if coerced is NotImplemented:
+ return NotImplemented
+ other = coerced[1]
+ _min = min(self._min,other._min)
+ _max = max(self._max,other._max)
+ found = not pany
+ for states, (start,stop) in \
+ self._iterranges(self._ranges,other._ranges,_min,_max):
+ if not pall(states):
+ return False
+ found = found or pany(states)
+ return found
+ else:
+ raise ValueError("Invalid type of function to create.")
+ try:
+ f.func_name = name
+ except TypeError:
+ pass
+ f.func_doc = doc
+ return f
+
+ # Intersection.
+ __and__ = _make_function("__and__","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+ __rand__ = _make_function("__rand__","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+ intersection = _make_function("intersection","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+
+ # Union.
+ __or__ = _make_function("__or__","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+ __ror__ = _make_function("__ror__","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+ union = _make_function("union","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+
+ # Difference.
+ __sub__ = _make_function("__sub__","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r1"] and not s["r2"])
+ __rsub__ = _make_function("__rsub__","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r2"] and not s["r1"])
+ difference = _make_function("difference","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r1"] and not s["r2"])
+
+ # Symmetric difference.
+ __xor__ = _make_function("__xor__","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+ __rxor__ = _make_function("__rxor__","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+ symmetric_difference = _make_function("symmetric_difference","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+
+ # Containership testing.
+ __contains__ = _make_function("__contains__","bool",
+ "Returns true if self is superset of other.",
+ lambda s: s["r1"] or not s["r2"])
+ issubset = _make_function("issubset","bool",
+ "Returns true if self is subset of other.",
+ lambda s: s["r2"] or not s["r1"])
+ istruesubset = _make_function("istruesubset","bool",
+ "Returns true if self is true subset of other.",
+ lambda s: s["r2"] or not s["r1"],
+ lambda s: s["r2"] and not s["r1"])
+ issuperset = _make_function("issuperset","bool",
+ "Returns true if self is superset of other.",
+ lambda s: s["r1"] or not s["r2"])
+ istruesuperset = _make_function("istruesuperset","bool",
+ "Returns true if self is true superset of other.",
+ lambda s: s["r1"] or not s["r2"],
+ lambda s: s["r1"] and not s["r2"])
+ overlaps = _make_function("overlaps","bool",
+ "Returns true if self overlaps with other.",
+ lambda s: True,
+ lambda s: s["r1"] and s["r2"])
+
+ # Comparison.
+ __eq__ = _make_function("__eq__","bool",
+ "Returns true if self is equal to other.",
+ lambda s: not ( s["r1"] ^ s["r2"] ))
+ __ne__ = _make_function("__ne__","bool",
+ "Returns true if self is different to other.",
+ lambda s: True,
+ lambda s: s["r1"] ^ s["r2"])
+
+ # Clean up namespace.
+ del _make_function
+
+ # Define other functions.
+ def inverse(self):
+ """Inverse of set as a new set."""
+
+ newset = self.__class__.__new__(self.__class__)
+ newset._min = self._min
+ newset._max = self._max
+ newset._ranges = []
+ laststop = self._min
+ for r in self._ranges:
+ if laststop < r[0]:
+ newset._ranges.append((laststop,r[0]))
+ laststop = r[1]
+ if laststop < self._max:
+ newset._ranges.append((laststop,self._max))
+ return newset
+
+ __invert__ = inverse
+
+ # Hashing
+ # -------
+
+ def __hash__(self):
+ """Returns a hash value representing this integer set. As the set is
+ always stored normalized, the hash value is guaranteed to match for
+ matching ranges."""
+
+ return self._hash
+
+ # Iterating
+ # ---------
+
+ def __len__(self):
+ """Get length of this integer set. In case the length is larger than
+ 2**31 (including infinitely sized integer sets), it raises an
+ OverflowError. This is due to len() restricting the size to
+ 0 <= len < 2**31."""
+
+ if not self._ranges:
+ return 0
+ if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
+ raise OverflowError("Infinitely sized integer set.")
+ rlen = 0
+ for r in self._ranges:
+ rlen += r[1]-r[0]
+ if rlen >= 2**31:
+ raise OverflowError("Integer set bigger than 2**31.")
+ return rlen
+
+ def len(self):
+ """Returns the length of this integer set as an integer. In case the
+ length is infinite, returns -1. This function exists because of a
+ limitation of the builtin len() function which expects values in
+ the range 0 <= len < 2**31. Use this function in case your integer
+ set might be larger."""
+
+ if not self._ranges:
+ return 0
+ if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
+ return -1
+ rlen = 0
+ for r in self._ranges:
+ rlen += r[1]-r[0]
+ return rlen
+
+ def __nonzero__(self):
+ """Returns true if this integer set contains at least one item."""
+
+ return bool(self._ranges)
+
+ def __iter__(self):
+ """Iterate over all values in this integer set. Iteration always starts
+ by iterating from lowest to highest over the ranges that are bounded.
+ After processing these, all ranges that are unbounded (maximum 2) are
+ yielded intermixed."""
+
+ ubranges = []
+ for r in self._ranges:
+ if r[0] is _MININF:
+ if r[1] is _MAXINF:
+ ubranges.extend(([0,1],[-1,-1]))
+ else:
+ ubranges.append([r[1]-1,-1])
+ elif r[1] is _MAXINF:
+ ubranges.append([r[0],1])
+ else:
+ for val in xrange(r[0],r[1]):
+ yield val
+ if ubranges:
+ while True:
+ for ubrange in ubranges:
+ yield ubrange[0]
+ ubrange[0] += ubrange[1]
+
+ # Printing
+ # --------
+
+ def __repr__(self):
+ """Return a representation of this integer set. The representation is
+ executable to get an equal integer set."""
+
+ rv = []
+ for start, stop in self._ranges:
+ if ( isinstance(start,(int,long)) and isinstance(stop,(int,long))
+ and stop-start == 1 ):
+ rv.append("%r" % start)
+ elif isinstance(stop,(int,long)):
+ rv.append("(%r,%r)" % (start,stop-1))
+ else:
+ rv.append("(%r,%r)" % (start,stop))
+ if self._min is not _MININF:
+ rv.append("min=%r" % self._min)
+ if self._max is not _MAXINF:
+ rv.append("max=%r" % self._max)
+ return "%s(%s)" % (self.__class__.__name__,",".join(rv))
+
+if __name__ == "__main__":
+ # Little test script demonstrating functionality.
+ x = IntSet((10,20),30)
+ y = IntSet((10,20))
+ z = IntSet((10,20),30,(15,19),min=0,max=40)
+ print x
+ print x&110
+ print x|110
+ print x^(15,25)
+ print x-12
+ print 12 in x
+ print x.issubset(x)
+ print y.issubset(x)
+ print x.istruesubset(x)
+ print y.istruesubset(x)
+ for val in x:
+ print val
+ print x.inverse()
+ print x == z
+ print x == y
+ print x <> y
+ print hash(x)
+ print hash(z)
+ print len(x)
+ print x.len()
diff --git a/paste/util/ip4.py b/paste/util/ip4.py
new file mode 100644
index 0000000..b0dfde8
--- /dev/null
+++ b/paste/util/ip4.py
@@ -0,0 +1,273 @@
+# -*- coding: iso-8859-15 -*-
+"""IP4 address range set implementation.
+
+Implements an IPv4-range type.
+
+Copyright (C) 2006, Heiko Wundram.
+Released under the MIT-license.
+"""
+
+# Version information
+# -------------------
+
+__author__ = "Heiko Wundram <me@modelnine.org>"
+__version__ = "0.2"
+__revision__ = "3"
+__date__ = "2006-01-20"
+
+
+# Imports
+# -------
+
+import intset
+import socket
+
+
+# IP4Range class
+# --------------
+
+class IP4Range(intset.IntSet):
+ """IP4 address range class with efficient storage of address ranges.
+ Supports all set operations."""
+
+ _MINIP4 = 0
+ _MAXIP4 = (1<<32) - 1
+ _UNITYTRANS = "".join([chr(n) for n in range(256)])
+ _IPREMOVE = "0123456789."
+
+ def __init__(self,*args):
+ """Initialize an ip4range class. The constructor accepts an unlimited
+ number of arguments that may either be tuples in the form (start,stop),
+ integers, longs or strings, where start and stop in a tuple may
+ also be of the form integer, long or string.
+
+ Passing an integer or long means passing an IPv4-address that's already
+ been converted to integer notation, whereas passing a string specifies
+ an address where this conversion still has to be done. A string
+ address may be in the following formats:
+
+ - 1.2.3.4 - a plain address, interpreted as a single address
+ - 1.2.3 - a set of addresses, interpreted as 1.2.3.0-1.2.3.255
+ - localhost - hostname to look up, interpreted as single address
+ - 1.2.3<->5 - a set of addresses, interpreted as 1.2.3.0-1.2.5.255
+ - 1.2.0.0/16 - a set of addresses, interpreted as 1.2.0.0-1.2.255.255
+
+ Only the first three notations are valid if you use a string address in
+ a tuple, whereby notation 2 is interpreted as 1.2.3.0 if specified as
+ lower bound and 1.2.3.255 if specified as upper bound, not as a range
+ of addresses.
+
+ Specifying a range is done with the <-> operator. This is necessary
+ because '-' might be present in a hostname. '<->' shouldn't be, ever.
+ """
+
+ # Special case copy constructor.
+ if len(args) == 1 and isinstance(args[0],IP4Range):
+ super(IP4Range,self).__init__(args[0])
+ return
+
+ # Convert arguments to tuple syntax.
+ args = list(args)
+ for i in range(len(args)):
+ argval = args[i]
+ if isinstance(argval,str):
+ if "<->" in argval:
+ # Type 4 address.
+ args[i] = self._parseRange(*argval.split("<->",1))
+ continue
+ elif "/" in argval:
+ # Type 5 address.
+ args[i] = self._parseMask(*argval.split("/",1))
+ else:
+ # Type 1, 2 or 3.
+ args[i] = self._parseAddrRange(argval)
+ elif isinstance(argval,tuple):
+ if len(tuple) <> 2:
+ raise ValueError("Tuple is of invalid length.")
+ addr1, addr2 = argval
+ if isinstance(addr1,str):
+ addr1 = self._parseAddrRange(addr1)[0]
+ elif not isinstance(addr1,(int,long)):
+ raise TypeError("Invalid argument.")
+ if isinstance(addr2,str):
+ addr2 = self._parseAddrRange(addr2)[1]
+ elif not isinstance(addr2,(int,long)):
+ raise TypeError("Invalid argument.")
+ args[i] = (addr1,addr2)
+ elif not isinstance(argval,(int,long)):
+ raise TypeError("Invalid argument.")
+
+ # Initialize the integer set.
+ super(IP4Range,self).__init__(min=self._MINIP4,max=self._MAXIP4,*args)
+
+ # Parsing functions
+ # -----------------
+
+ def _parseRange(self,addr1,addr2):
+ naddr1, naddr1len = _parseAddr(addr1)
+ naddr2, naddr2len = _parseAddr(addr2)
+ if naddr2len < naddr1len:
+ naddr2 += naddr1&(((1<<((naddr1len-naddr2len)*8))-1)<<
+ (naddr2len*8))
+ naddr2len = naddr1len
+ elif naddr2len > naddr1len:
+ raise ValueError("Range has more dots than address.")
+ naddr1 <<= (4-naddr1len)*8
+ naddr2 <<= (4-naddr2len)*8
+ naddr2 += (1<<((4-naddr2len)*8))-1
+ return (naddr1,naddr2)
+
+ def _parseMask(self,addr,mask):
+ naddr, naddrlen = _parseAddr(addr)
+ naddr <<= (4-naddrlen)*8
+ try:
+ if not mask:
+ masklen = 0
+ else:
+ masklen = int(mask)
+ if not 0 <= masklen <= 32:
+ raise ValueError
+ except ValueError:
+ try:
+ mask = _parseAddr(mask,False)
+ except ValueError:
+ raise ValueError("Mask isn't parseable.")
+ remaining = 0
+ masklen = 0
+ if not mask:
+ masklen = 0
+ else:
+ while not (mask&1):
+ remaining += 1
+ while (mask&1):
+ mask >>= 1
+ masklen += 1
+ if remaining+masklen <> 32:
+ raise ValueError("Mask isn't a proper host mask.")
+ naddr1 = naddr & (((1<<masklen)-1)<<(32-masklen))
+ naddr2 = naddr1 + (1<<(32-masklen)) - 1
+ return (naddr1,naddr2)
+
+ def _parseAddrRange(self,addr):
+ naddr, naddrlen = _parseAddr(addr)
+ naddr1 = naddr<<((4-naddrlen)*8)
+ naddr2 = ( (naddr<<((4-naddrlen)*8)) +
+ (1<<((4-naddrlen)*8)) - 1 )
+ return (naddr1,naddr2)
+
+ # Utility functions
+ # -----------------
+
+ def _int2ip(self,num):
+ rv = []
+ for i in range(4):
+ rv.append(str(num&255))
+ num >>= 8
+ return ".".join(reversed(rv))
+
+ # Iterating
+ # ---------
+
+ def iteraddresses(self):
+ """Returns an iterator which iterates over ips in this iprange. An
+ IP is returned in string form (e.g. '1.2.3.4')."""
+
+ for v in super(IP4Range,self).__iter__():
+ yield self._int2ip(v)
+
+ def iterranges(self):
+ """Returns an iterator which iterates over ip-ip ranges which build
+ this iprange if combined. An ip-ip pair is returned in string form
+ (e.g. '1.2.3.4-2.3.4.5')."""
+
+ for r in self._ranges:
+ if r[1]-r[0] == 1:
+ yield self._int2ip(r[0])
+ else:
+ yield '%s-%s' % (self._int2ip(r[0]),self._int2ip(r[1]-1))
+
+ def itermasks(self):
+ """Returns an iterator which iterates over ip/mask pairs which build
+ this iprange if combined. An IP/Mask pair is returned in string form
+ (e.g. '1.2.3.0/24')."""
+
+ for r in self._ranges:
+ for v in self._itermasks(r):
+ yield v
+
+ def _itermasks(self,r):
+ ranges = [r]
+ while ranges:
+ cur = ranges.pop()
+ curmask = 0
+ while True:
+ curmasklen = 1<<(32-curmask)
+ start = (cur[0]+curmasklen-1)&(((1<<curmask)-1)<<(32-curmask))
+ if start >= cur[0] and start+curmasklen <= cur[1]:
+ break
+ else:
+ curmask += 1
+ yield "%s/%s" % (self._int2ip(start),curmask)
+ if cur[0] < start:
+ ranges.append((cur[0],start))
+ if cur[1] > start+curmasklen:
+ ranges.append((start+curmasklen,cur[1]))
+
+ __iter__ = iteraddresses
+
+ # Printing
+ # --------
+
+ def __repr__(self):
+ """Returns a string which can be used to reconstruct this iprange."""
+
+ rv = []
+ for start, stop in self._ranges:
+ if stop-start == 1:
+ rv.append("%r" % (self._int2ip(start),))
+ else:
+ rv.append("(%r,%r)" % (self._int2ip(start),
+ self._int2ip(stop-1)))
+ return "%s(%s)" % (self.__class__.__name__,",".join(rv))
+
+def _parseAddr(addr,lookup=True):
+ if lookup and addr.translate(IP4Range._UNITYTRANS, IP4Range._IPREMOVE):
+ try:
+ addr = socket.gethostbyname(addr)
+ except socket.error:
+ raise ValueError("Invalid Hostname as argument.")
+ naddr = 0
+ for naddrpos, part in enumerate(addr.split(".")):
+ if naddrpos >= 4:
+ raise ValueError("Address contains more than four parts.")
+ try:
+ if not part:
+ part = 0
+ else:
+ part = int(part)
+ if not 0 <= part < 256:
+ raise ValueError
+ except ValueError:
+ raise ValueError("Address part out of range.")
+ naddr <<= 8
+ naddr += part
+ return naddr, naddrpos+1
+
+def ip2int(addr, lookup=True):
+ return _parseAddr(addr, lookup=lookup)[0]
+
+if __name__ == "__main__":
+ # Little test script.
+ x = IP4Range("172.22.162.250/24")
+ y = IP4Range("172.22.162.250","172.22.163.250","172.22.163.253<->255")
+ print x
+ for val in x.itermasks():
+ print val
+ for val in y.itermasks():
+ print val
+ for val in (x|y).itermasks():
+ print val
+ for val in (x^y).iterranges():
+ print val
+ for val in x:
+ print val
diff --git a/paste/util/killthread.py b/paste/util/killthread.py
new file mode 100644
index 0000000..f1fc93f
--- /dev/null
+++ b/paste/util/killthread.py
@@ -0,0 +1,30 @@
+"""
+Kill a thread, from http://sebulba.wikispaces.com/recipe+thread2
+"""
+import types
+try:
+ import ctypes
+except ImportError:
+ raise ImportError(
+ "You cannot use paste.util.killthread without ctypes installed")
+if not hasattr(ctypes, 'pythonapi'):
+ raise ImportError(
+ "You cannot use paste.util.killthread without ctypes.pythonapi")
+
+def async_raise(tid, exctype):
+ """raises the exception, performs cleanup if needed.
+
+ tid is the value given by thread.get_ident() (an integer).
+ Raise SystemExit to kill a thread."""
+ if not isinstance(exctype, (types.ClassType, type)):
+ raise TypeError("Only types can be raised (not instances)")
+ if not isinstance(tid, int):
+ raise TypeError("tid must be an integer")
+ res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
+ if res == 0:
+ raise ValueError("invalid thread id")
+ elif res != 1:
+ # """if it returns a number greater than one, you're in trouble,
+ # and you should call it again with exc=NULL to revert the effect"""
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
+ raise SystemError("PyThreadState_SetAsyncExc failed")
diff --git a/paste/util/looper.py b/paste/util/looper.py
new file mode 100644
index 0000000..03a6b42
--- /dev/null
+++ b/paste/util/looper.py
@@ -0,0 +1,152 @@
+"""
+Helper for looping over sequences, particular in templates.
+
+Often in a loop in a template it's handy to know what's next up,
+previously up, if this is the first or last item in the sequence, etc.
+These can be awkward to manage in a normal Python loop, but using the
+looper you can get a better sense of the context. Use like::
+
+ >>> for loop, item in looper(['a', 'b', 'c']):
+ ... print loop.number, item
+ ... if not loop.last:
+ ... print '---'
+ 1 a
+ ---
+ 2 b
+ ---
+ 3 c
+
+"""
+
+__all__ = ['looper']
+
+class looper(object):
+ """
+ Helper for looping (particularly in templates)
+
+ Use this like::
+
+ for loop, item in looper(seq):
+ if loop.first:
+ ...
+ """
+
+ def __init__(self, seq):
+ self.seq = seq
+
+ def __iter__(self):
+ return looper_iter(self.seq)
+
+ def __repr__(self):
+ return '<%s for %r>' % (
+ self.__class__.__name__, self.seq)
+
+class looper_iter(object):
+
+ def __init__(self, seq):
+ self.seq = list(seq)
+ self.pos = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.pos >= len(self.seq):
+ raise StopIteration
+ result = loop_pos(self.seq, self.pos), self.seq[self.pos]
+ self.pos += 1
+ return result
+
+class loop_pos(object):
+
+ def __init__(self, seq, pos):
+ self.seq = seq
+ self.pos = pos
+
+ def __repr__(self):
+ return '<loop pos=%r at %r>' % (
+ self.seq[pos], pos)
+
+ def index(self):
+ return self.pos
+ index = property(index)
+
+ def number(self):
+ return self.pos + 1
+ number = property(number)
+
+ def item(self):
+ return self.seq[self.pos]
+ item = property(item)
+
+ def next(self):
+ try:
+ return self.seq[self.pos+1]
+ except IndexError:
+ return None
+ next = property(next)
+
+ def previous(self):
+ if self.pos == 0:
+ return None
+ return self.seq[self.pos-1]
+ previous = property(previous)
+
+ def odd(self):
+ return not self.pos % 2
+ odd = property(odd)
+
+ def even(self):
+ return self.pos % 2
+ even = property(even)
+
+ def first(self):
+ return self.pos == 0
+ first = property(first)
+
+ def last(self):
+ return self.pos == len(self.seq)-1
+ last = property(last)
+
+ def length(self):
+ return len(self.seq)
+ length = property(length)
+
+ def first_group(self, getter=None):
+ """
+ Returns true if this item is the start of a new group,
+ where groups mean that some attribute has changed. The getter
+ can be None (the item itself changes), an attribute name like
+ ``'.attr'``, a function, or a dict key or list index.
+ """
+ if self.first:
+ return True
+ return self._compare_group(self.item, self.previous, getter)
+
+ def last_group(self, getter=None):
+ """
+ Returns true if this item is the end of a new group,
+ where groups mean that some attribute has changed. The getter
+ can be None (the item itself changes), an attribute name like
+ ``'.attr'``, a function, or a dict key or list index.
+ """
+ if self.last:
+ return True
+ return self._compare_group(self.item, self.next, getter)
+
+ def _compare_group(self, item, other, getter):
+ if getter is None:
+ return item != other
+ elif (isinstance(getter, basestring)
+ and getter.startswith('.')):
+ getter = getter[1:]
+ if getter.endswith('()'):
+ getter = getter[:-2]
+ return getattr(item, getter)() != getattr(other, getter)()
+ else:
+ return getattr(item, getter) != getattr(other, getter)
+ elif callable(getter):
+ return getter(item) != getter(other)
+ else:
+ return item[getter] != other[getter]
+
diff --git a/paste/util/mimeparse.py b/paste/util/mimeparse.py
new file mode 100644
index 0000000..fe699f7
--- /dev/null
+++ b/paste/util/mimeparse.py
@@ -0,0 +1,160 @@
+"""MIME-Type Parser
+
+This module provides basic functions for handling mime-types. It can handle
+matching mime-types against a list of media-ranges. See section 14.1 of
+the HTTP specification [RFC 2616] for a complete explanation.
+
+ http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
+
+Based on mimeparse 0.1.2 by Joe Gregorio:
+
+ http://code.google.com/p/mimeparse/
+
+Contents:
+ - parse_mime_type(): Parses a mime-type into its component parts.
+ - parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
+ - quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
+ - quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
+ - best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
+ - desired_matches(): Filter against a list of desired mime-types in the order the server prefers.
+
+"""
+
+
+def parse_mime_type(mime_type):
+ """Carves up a mime-type and returns a tuple of the
+ (type, subtype, params) where 'params' is a dictionary
+ of all the parameters for the media range.
+ For example, the media range 'application/xhtml;q=0.5' would
+ get parsed into:
+
+ ('application', 'xhtml', {'q', '0.5'})
+ """
+ type = mime_type.split(';')
+ type, plist = type[0], type[1:]
+ try:
+ type, subtype = type.split('/', 1)
+ except ValueError:
+ type, subtype = type.strip() or '*', '*'
+ else:
+ type = type.strip() or '*'
+ subtype = subtype.strip() or '*'
+ params = {}
+ for param in plist:
+ param = param.split('=', 1)
+ if len(param) == 2:
+ key, value = param[0].strip(), param[1].strip()
+ if key and value:
+ params[key] = value
+ return type, subtype, params
+
+def parse_media_range(range):
+ """Carves up a media range and returns a tuple of the
+ (type, subtype, params) where 'params' is a dictionary
+ of all the parameters for the media range.
+ For example, the media range 'application/*;q=0.5' would
+ get parsed into:
+
+ ('application', '*', {'q', '0.5'})
+
+ In addition this function also guarantees that there
+ is a value for 'q' in the params dictionary, filling it
+ in with a proper default if necessary.
+ """
+ type, subtype, params = parse_mime_type(range)
+ try:
+ if not 0 <= float(params['q']) <= 1:
+ raise ValueError
+ except (KeyError, ValueError):
+ params['q'] = '1'
+ return type, subtype, params
+
+def fitness_and_quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a given mime-type against
+ a list of media_ranges that have already been
+ parsed by parse_media_range(). Returns a tuple of
+ the fitness value and the value of the 'q' quality
+ parameter of the best match, or (-1, 0) if no match
+ was found. Just as for quality_parsed(), 'parsed_ranges'
+ must be a list of parsed media ranges."""
+ best_fitness, best_fit_q = -1, 0
+ target_type, target_subtype, target_params = parse_media_range(mime_type)
+ for type, subtype, params in parsed_ranges:
+ if (type == target_type
+ or type == '*' or target_type == '*') and (
+ subtype == target_subtype
+ or subtype == '*' or target_subtype == '*'):
+ fitness = 0
+ if type == target_type:
+ fitness += 100
+ if subtype == target_subtype:
+ fitness += 10
+ for key in target_params:
+ if key != 'q' and key in params:
+ if params[key] == target_params[key]:
+ fitness += 1
+ if fitness > best_fitness:
+ best_fitness = fitness
+ best_fit_q = params['q']
+ return best_fitness, float(best_fit_q)
+
+def quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a given mime-type against
+ a list of media_ranges that have already been
+ parsed by parse_media_range(). Returns the
+ 'q' quality parameter of the best match, 0 if no
+ match was found. This function behaves the same as quality()
+ except that 'parsed_ranges' must be a list of
+ parsed media ranges."""
+ return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
+
+def quality(mime_type, ranges):
+ """Returns the quality 'q' of a mime-type when compared
+ against the media-ranges in ranges. For example:
+
+ >>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
+ 0.7
+
+ """
+ parsed_ranges = map(parse_media_range, ranges.split(','))
+ return quality_parsed(mime_type, parsed_ranges)
+
+def best_match(supported, header):
+ """Takes a list of supported mime-types and finds the best
+ match for all the media-ranges listed in header. In case of
+ ambiguity, whatever comes first in the list will be chosen.
+ The value of header must be a string that conforms to the format
+ of the HTTP Accept: header. The value of 'supported' is a list
+ of mime-types.
+
+ >>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
+ 'text/xml'
+ """
+ if not supported:
+ return ''
+ parsed_header = map(parse_media_range, header.split(','))
+ best_type = max([
+ (fitness_and_quality_parsed(mime_type, parsed_header), -n)
+ for n, mime_type in enumerate(supported)])
+ return best_type[0][1] and supported[-best_type[1]] or ''
+
+def desired_matches(desired, header):
+ """Takes a list of desired mime-types in the order the server prefers to
+ send them regardless of the browsers preference.
+
+ Browsers (such as Firefox) technically want XML over HTML depending on how
+ one reads the specification. This function is provided for a server to
+ declare a set of desired mime-types it supports, and returns a subset of
+ the desired list in the same order should each one be Accepted by the
+ browser.
+
+ >>> desired_matches(['text/html', 'application/xml'], \
+ ... 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png')
+ ['text/html', 'application/xml']
+ >>> desired_matches(['text/html', 'application/xml'], 'application/xml,application/json')
+ ['application/xml']
+ """
+ parsed_ranges = map(parse_media_range, header.split(','))
+ return [mimetype for mimetype in desired
+ if quality_parsed(mimetype, parsed_ranges)]
+
diff --git a/paste/util/multidict.py b/paste/util/multidict.py
new file mode 100644
index 0000000..d3eb1e9
--- /dev/null
+++ b/paste/util/multidict.py
@@ -0,0 +1,397 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+import cgi
+import copy
+import sys
+from UserDict import DictMixin
+
+class MultiDict(DictMixin):
+
+ """
+ An ordered dictionary that can have multiple values for each key.
+ Adds the methods getall, getone, mixed, and add to the normal
+ dictionary interface.
+ """
+
+ def __init__(self, *args, **kw):
+ if len(args) > 1:
+ raise TypeError(
+ "MultiDict can only be called with one positional argument")
+ if args:
+ if hasattr(args[0], 'iteritems'):
+ items = list(args[0].iteritems())
+ elif hasattr(args[0], 'items'):
+ items = args[0].items()
+ else:
+ items = list(args[0])
+ self._items = items
+ else:
+ self._items = []
+ self._items.extend(kw.iteritems())
+
+ def __getitem__(self, key):
+ for k, v in self._items:
+ if k == key:
+ return v
+ raise KeyError(repr(key))
+
+ def __setitem__(self, key, value):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+ self._items.append((key, value))
+
+ def add(self, key, value):
+ """
+ Add the key and value, not overwriting any previous value.
+ """
+ self._items.append((key, value))
+
+ def getall(self, key):
+ """
+ Return a list of all values matching the key (may be an empty list)
+ """
+ result = []
+ for k, v in self._items:
+ if key == k:
+ result.append(v)
+ return result
+
+ def getone(self, key):
+ """
+ Get one value matching the key, raising a KeyError if multiple
+ values were found.
+ """
+ v = self.getall(key)
+ if not v:
+ raise KeyError('Key not found: %r' % key)
+ if len(v) > 1:
+ raise KeyError('Multiple values match %r: %r' % (key, v))
+ return v[0]
+
+ def mixed(self):
+ """
+ Returns a dictionary where the values are either single
+ values, or a list of values when a key/value appears more than
+ once in this dictionary. This is similar to the kind of
+ dictionary often used to represent the variables in a web
+ request.
+ """
+ result = {}
+ multi = {}
+ for key, value in self._items:
+ if key in result:
+ # We do this to not clobber any lists that are
+ # *actual* values in this dictionary:
+ if key in multi:
+ result[key].append(value)
+ else:
+ result[key] = [result[key], value]
+ multi[key] = None
+ else:
+ result[key] = value
+ return result
+
+ def dict_of_lists(self):
+ """
+ Returns a dictionary where each key is associated with a
+ list of values.
+ """
+ result = {}
+ for key, value in self._items:
+ if key in result:
+ result[key].append(value)
+ else:
+ result[key] = [value]
+ return result
+
+ def __delitem__(self, key):
+ items = self._items
+ found = False
+ for i in range(len(items)-1, -1, -1):
+ if items[i][0] == key:
+ del items[i]
+ found = True
+ if not found:
+ raise KeyError(repr(key))
+
+ def __contains__(self, key):
+ for k, v in self._items:
+ if k == key:
+ return True
+ return False
+
+ has_key = __contains__
+
+ def clear(self):
+ self._items = []
+
+ def copy(self):
+ return MultiDict(self)
+
+ def setdefault(self, key, default=None):
+ for k, v in self._items:
+ if key == k:
+ return v
+ self._items.append((key, default))
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ for i in range(len(self._items)):
+ if self._items[i][0] == key:
+ v = self._items[i][1]
+ del self._items[i]
+ return v
+ if args:
+ return args[0]
+ else:
+ raise KeyError(repr(key))
+
+ def popitem(self):
+ return self._items.pop()
+
+ def update(self, other=None, **kwargs):
+ if other is None:
+ pass
+ elif hasattr(other, 'items'):
+ self._items.extend(other.items())
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self._items.append((k, other[k]))
+ else:
+ for k, v in other:
+ self._items.append((k, v))
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ items = ', '.join(['(%r, %r)' % v for v in self._items])
+ return '%s([%s])' % (self.__class__.__name__, items)
+
+ def __len__(self):
+ return len(self._items)
+
+ ##
+ ## All the iteration:
+ ##
+
+ def keys(self):
+ return [k for k, v in self._items]
+
+ def iterkeys(self):
+ for k, v in self._items:
+ yield k
+
+ __iter__ = iterkeys
+
+ def items(self):
+ return self._items[:]
+
+ def iteritems(self):
+ return iter(self._items)
+
+ def values(self):
+ return [v for k, v in self._items]
+
+ def itervalues(self):
+ for k, v in self._items:
+ yield v
+
+class UnicodeMultiDict(DictMixin):
+ """
+ A MultiDict wrapper that decodes returned values to unicode on the
+ fly. Decoding is not applied to assigned values.
+
+ The key/value contents are assumed to be ``str``/``strs`` or
+ ``str``/``FieldStorages`` (as is returned by the ``paste.request.parse_``
+ functions).
+
+ Can optionally also decode keys when the ``decode_keys`` argument is
+ True.
+
+ ``FieldStorage`` instances are cloned, and the clone's ``filename``
+ variable is decoded. Its ``name`` variable is decoded when ``decode_keys``
+ is enabled.
+
+ """
+ def __init__(self, multi=None, encoding=None, errors='strict',
+ decode_keys=False):
+ self.multi = multi
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ self.encoding = encoding
+ self.errors = errors
+ self.decode_keys = decode_keys
+
+ def _decode_key(self, key):
+ if self.decode_keys:
+ try:
+ key = key.decode(self.encoding, self.errors)
+ except AttributeError:
+ pass
+ return key
+
+ def _decode_value(self, value):
+ """
+ Decode the specified value to unicode. Assumes value is a ``str`` or
+ `FieldStorage`` object.
+
+ ``FieldStorage`` objects are specially handled.
+ """
+ if isinstance(value, cgi.FieldStorage):
+ # decode FieldStorage's field name and filename
+ value = copy.copy(value)
+ if self.decode_keys:
+ value.name = value.name.decode(self.encoding, self.errors)
+ value.filename = value.filename.decode(self.encoding, self.errors)
+ else:
+ try:
+ value = value.decode(self.encoding, self.errors)
+ except AttributeError:
+ pass
+ return value
+
+ def __getitem__(self, key):
+ return self._decode_value(self.multi.__getitem__(key))
+
+ def __setitem__(self, key, value):
+ self.multi.__setitem__(key, value)
+
+ def add(self, key, value):
+ """
+ Add the key and value, not overwriting any previous value.
+ """
+ self.multi.add(key, value)
+
+ def getall(self, key):
+ """
+ Return a list of all values matching the key (may be an empty list)
+ """
+ return [self._decode_value(v) for v in self.multi.getall(key)]
+
+ def getone(self, key):
+ """
+ Get one value matching the key, raising a KeyError if multiple
+ values were found.
+ """
+ return self._decode_value(self.multi.getone(key))
+
+ def mixed(self):
+ """
+ Returns a dictionary where the values are either single
+ values, or a list of values when a key/value appears more than
+ once in this dictionary. This is similar to the kind of
+ dictionary often used to represent the variables in a web
+ request.
+ """
+ unicode_mixed = {}
+ for key, value in self.multi.mixed().iteritems():
+ if isinstance(value, list):
+ value = [self._decode_value(value) for value in value]
+ else:
+ value = self._decode_value(value)
+ unicode_mixed[self._decode_key(key)] = value
+ return unicode_mixed
+
+ def dict_of_lists(self):
+ """
+ Returns a dictionary where each key is associated with a
+ list of values.
+ """
+ unicode_dict = {}
+ for key, value in self.multi.dict_of_lists().iteritems():
+ value = [self._decode_value(value) for value in value]
+ unicode_dict[self._decode_key(key)] = value
+ return unicode_dict
+
+ def __delitem__(self, key):
+ self.multi.__delitem__(key)
+
+ def __contains__(self, key):
+ return self.multi.__contains__(key)
+
+ has_key = __contains__
+
+ def clear(self):
+ self.multi.clear()
+
+ def copy(self):
+ return UnicodeMultiDict(self.multi.copy(), self.encoding, self.errors)
+
+ def setdefault(self, key, default=None):
+ return self._decode_value(self.multi.setdefault(key, default))
+
+ def pop(self, key, *args):
+ return self._decode_value(self.multi.pop(key, *args))
+
+ def popitem(self):
+ k, v = self.multi.popitem()
+ return (self._decode_key(k), self._decode_value(v))
+
+ def __repr__(self):
+ items = ', '.join(['(%r, %r)' % v for v in self.items()])
+ return '%s([%s])' % (self.__class__.__name__, items)
+
+ def __len__(self):
+ return self.multi.__len__()
+
+ ##
+ ## All the iteration:
+ ##
+
+ def keys(self):
+ return [self._decode_key(k) for k in self.multi.iterkeys()]
+
+ def iterkeys(self):
+ for k in self.multi.iterkeys():
+ yield self._decode_key(k)
+
+ __iter__ = iterkeys
+
+ def items(self):
+ return [(self._decode_key(k), self._decode_value(v)) for \
+ k, v in self.multi.iteritems()]
+
+ def iteritems(self):
+ for k, v in self.multi.iteritems():
+ yield (self._decode_key(k), self._decode_value(v))
+
+ def values(self):
+ return [self._decode_value(v) for v in self.multi.itervalues()]
+
+ def itervalues(self):
+ for v in self.multi.itervalues():
+ yield self._decode_value(v)
+
+__test__ = {
+ 'general': """
+ >>> d = MultiDict(a=1, b=2)
+ >>> d['a']
+ 1
+ >>> d.getall('c')
+ []
+ >>> d.add('a', 2)
+ >>> d['a']
+ 1
+ >>> d.getall('a')
+ [1, 2]
+ >>> d['b'] = 4
+ >>> d.getall('b')
+ [4]
+ >>> d.keys()
+ ['a', 'a', 'b']
+ >>> d.items()
+ [('a', 1), ('a', 2), ('b', 4)]
+ >>> d.mixed()
+ {'a': [1, 2], 'b': 4}
+ >>> MultiDict([('a', 'b')], c=2)
+ MultiDict([('a', 'b'), ('c', 2)])
+ """}
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/quoting.py b/paste/util/quoting.py
new file mode 100644
index 0000000..6184752
--- /dev/null
+++ b/paste/util/quoting.py
@@ -0,0 +1,98 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+import cgi
+import htmlentitydefs
+import urllib
+import re
+
+__all__ = ['html_quote', 'html_unquote', 'url_quote', 'url_unquote',
+ 'strip_html']
+
+default_encoding = 'UTF-8'
+
+def html_quote(v, encoding=None):
+ r"""
+ Quote the value (turned to a string) as HTML. This quotes <, >,
+ and quotes:
+
+ >>> html_quote(1)
+ '1'
+ >>> html_quote(None)
+ ''
+ >>> html_quote('<hey!>')
+ '&lt;hey!&gt;'
+ >>> html_quote(u'\u1029')
+ '\xe1\x80\xa9'
+ """
+ encoding = encoding or default_encoding
+ if v is None:
+ return ''
+ elif isinstance(v, str):
+ return cgi.escape(v, 1)
+ elif isinstance(v, unicode):
+ return cgi.escape(v.encode(encoding), 1)
+ else:
+ return cgi.escape(unicode(v).encode(encoding), 1)
+
+_unquote_re = re.compile(r'&([a-zA-Z]+);')
+def _entity_subber(match, name2c=htmlentitydefs.name2codepoint):
+ code = name2c.get(match.group(1))
+ if code:
+ return unichr(code)
+ else:
+ return match.group(0)
+
+def html_unquote(s, encoding=None):
+ r"""
+ Decode the value.
+
+ >>> html_unquote('&lt;hey&nbsp;you&gt;')
+ u'<hey\xa0you>'
+ >>> html_unquote('')
+ u''
+ >>> html_unquote('&blahblah;')
+ u'&blahblah;'
+ >>> html_unquote('\xe1\x80\xa9')
+ u'\u1029'
+ """
+ if isinstance(s, str):
+ if s == '':
+ # workaround re.sub('', '', u'') returning '' < 2.5.2
+ # instead of u'' >= 2.5.2
+ return u''
+ s = s.decode(encoding or default_encoding)
+ return _unquote_re.sub(_entity_subber, s)
+
+def strip_html(s):
+ # should this use html_unquote?
+ s = re.sub('<.*?>', '', s)
+ s = html_unquote(s)
+ return s
+
+def no_quote(s):
+ """
+ Quoting that doesn't do anything
+ """
+ return s
+
+_comment_quote_re = re.compile(r'\-\s*\>')
+# Everything but \r, \n, \t:
+_bad_chars_re = re.compile('[\x00-\x08\x0b-\x0c\x0e-\x1f]')
+def comment_quote(s):
+ """
+ Quote that makes sure text can't escape a comment
+ """
+ comment = str(s)
+ #comment = _bad_chars_re.sub('', comment)
+ #print 'in ', repr(str(s))
+ #print 'out', repr(comment)
+ comment = _comment_quote_re.sub('-&gt;', comment)
+ return comment
+
+url_quote = urllib.quote
+url_unquote = urllib.unquote
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/scgiserver.py b/paste/util/scgiserver.py
new file mode 100644
index 0000000..d20c952
--- /dev/null
+++ b/paste/util/scgiserver.py
@@ -0,0 +1,171 @@
+"""
+SCGI-->WSGI application proxy, "SWAP".
+
+(Originally written by Titus Brown.)
+
+This lets an SCGI front-end like mod_scgi be used to execute WSGI
+application objects. To use it, subclass the SWAP class like so::
+
+ class TestAppHandler(swap.SWAP):
+ def __init__(self, *args, **kwargs):
+ self.prefix = '/canal'
+ self.app_obj = TestAppClass
+ swap.SWAP.__init__(self, *args, **kwargs)
+
+where 'TestAppClass' is the application object from WSGI and '/canal'
+is the prefix for what is served by the SCGI Web-server-side process.
+
+Then execute the SCGI handler "as usual" by doing something like this::
+
+ scgi_server.SCGIServer(TestAppHandler, port=4000).serve()
+
+and point mod_scgi (or whatever your SCGI front end is) at port 4000.
+
+Kudos to the WSGI folk for writing a nice PEP & the Quixote folk for
+writing a nice extensible SCGI server for Python!
+"""
+
+import sys
+import time
+from scgi import scgi_server
+
+def debug(msg):
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S",
+ time.localtime(time.time()))
+ sys.stderr.write("[%s] %s\n" % (timestamp, msg))
+
+class SWAP(scgi_server.SCGIHandler):
+ """
+ SCGI->WSGI application proxy: let an SCGI server execute WSGI
+ application objects.
+ """
+ app_obj = None
+ prefix = None
+
+ def __init__(self, *args, **kwargs):
+ assert self.app_obj, "must set app_obj"
+ assert self.prefix is not None, "must set prefix"
+ args = (self,) + args
+ scgi_server.SCGIHandler.__init__(*args, **kwargs)
+
+ def handle_connection(self, conn):
+ """
+ Handle an individual connection.
+ """
+ input = conn.makefile("r")
+ output = conn.makefile("w")
+
+ environ = self.read_env(input)
+ environ['wsgi.input'] = input
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1, 0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = False
+
+ # dunno how SCGI does HTTPS signalling; can't test it myself... @CTB
+ if environ.get('HTTPS','off') in ('on','1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ ## SCGI does some weird environ manglement. We need to set
+ ## SCRIPT_NAME from 'prefix' and then set PATH_INFO from
+ ## REQUEST_URI.
+
+ prefix = self.prefix
+ path = environ['REQUEST_URI'][len(prefix):].split('?', 1)[0]
+
+ environ['SCRIPT_NAME'] = prefix
+ environ['PATH_INFO'] = path
+
+ headers_set = []
+ headers_sent = []
+ chunks = []
+ def write(data):
+ chunks.append(data)
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ ###
+
+ result = self.app_obj(environ, start_response)
+ try:
+ for data in result:
+ chunks.append(data)
+
+ # Before the first output, send the stored headers
+ if not headers_set:
+ # Error -- the app never called start_response
+ status = '500 Server Error'
+ response_headers = [('Content-type', 'text/html')]
+ chunks = ["XXX start_response never called"]
+ else:
+ status, response_headers = headers_sent[:] = headers_set
+
+ output.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ output.write('%s: %s\r\n' % header)
+ output.write('\r\n')
+
+ for data in chunks:
+ output.write(data)
+ finally:
+ if hasattr(result,'close'):
+ result.close()
+
+ # SCGI backends use connection closing to signal 'fini'.
+ try:
+ input.close()
+ output.close()
+ conn.close()
+ except IOError, err:
+ debug("IOError while closing connection ignored: %s" % err)
+
+
+def serve_application(application, prefix, port=None, host=None, max_children=None):
+ """
+ Serve the specified WSGI application via SCGI proxy.
+
+ ``application``
+ The WSGI application to serve.
+
+ ``prefix``
+ The prefix for what is served by the SCGI Web-server-side process.
+
+ ``port``
+ Optional port to bind the SCGI proxy to. Defaults to SCGIServer's
+ default port value.
+
+ ``host``
+ Optional host to bind the SCGI proxy to. Defaults to SCGIServer's
+ default host value.
+
+ ``host``
+ Optional maximum number of child processes the SCGIServer will
+ spawn. Defaults to SCGIServer's default max_children value.
+ """
+ class SCGIAppHandler(SWAP):
+ def __init__ (self, *args, **kwargs):
+ self.prefix = prefix
+ self.app_obj = application
+ SWAP.__init__(self, *args, **kwargs)
+
+ kwargs = dict(handler_class=SCGIAppHandler)
+ for kwarg in ('host', 'port', 'max_children'):
+ if locals()[kwarg] is not None:
+ kwargs[kwarg] = locals()[kwarg]
+
+ scgi_server.SCGIServer(**kwargs).serve()
diff --git a/paste/util/string24.py b/paste/util/string24.py
new file mode 100644
index 0000000..7c0e001
--- /dev/null
+++ b/paste/util/string24.py
@@ -0,0 +1,531 @@
+"""A collection of string operations (most are no longer used).
+
+Warning: most of the code you see here isn't normally used nowadays.
+Beginning with Python 1.6, many of these functions are implemented as
+methods on the standard string object. They used to be implemented by
+a built-in module called strop, but strop is now obsolete itself.
+
+Public module variables:
+
+whitespace -- a string containing all characters considered whitespace
+lowercase -- a string containing all characters considered lowercase letters
+uppercase -- a string containing all characters considered uppercase letters
+letters -- a string containing all characters considered letters
+digits -- a string containing all characters considered decimal digits
+hexdigits -- a string containing all characters considered hexadecimal digits
+octdigits -- a string containing all characters considered octal digits
+punctuation -- a string containing all characters considered punctuation
+printable -- a string containing all characters considered printable
+
+"""
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+letters = lowercase + uppercase
+ascii_lowercase = lowercase
+ascii_uppercase = uppercase
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + letters + punctuation + whitespace
+
+# Case conversion helpers
+# Use str to convert Unicode literal in case of -U
+# Note that Cookie.py bogusly uses _idmap :(
+l = map(chr, xrange(256))
+_idmap = str('').join(l)
+del l
+
+# Functions which aren't available as string methods.
+
+# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
+# See also regsub.capwords().
+def capwords(s, sep=None):
+ """capwords(s, [sep]) -> string
+
+ Split the argument into words using split, capitalize each
+ word using capitalize, and join the capitalized words using
+ join. Note that this replaces runs of whitespace characters by
+ a single space.
+
+ """
+ return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
+
+
+# Construct a translation string
+_idmapL = None
+def maketrans(fromstr, tostr):
+ """maketrans(frm, to) -> string
+
+ Return a translation table (a string of 256 bytes long)
+ suitable for use in string.translate. The strings frm and to
+ must be of the same length.
+
+ """
+ if len(fromstr) != len(tostr):
+ raise ValueError, "maketrans arguments must have same length"
+ global _idmapL
+ if not _idmapL:
+ _idmapL = map(None, _idmap)
+ L = _idmapL[:]
+ fromstr = map(ord, fromstr)
+ for i in range(len(fromstr)):
+ L[fromstr[i]] = tostr[i]
+ return ''.join(L)
+
+
+
+####################################################################
+import re as _re
+
+class _multimap:
+ """Helper class for combining multiple mappings.
+
+ Used by .{safe_,}substitute() to combine the mapping and keyword
+ arguments.
+ """
+ def __init__(self, primary, secondary):
+ self._primary = primary
+ self._secondary = secondary
+
+ def __getitem__(self, key):
+ try:
+ return self._primary[key]
+ except KeyError:
+ return self._secondary[key]
+
+
+class _TemplateMetaclass(type):
+ pattern = r"""
+ %(delim)s(?:
+ (?P<escaped>%(delim)s) | # Escape sequence of two delimiters
+ (?P<named>%(id)s) | # delimiter and a Python identifier
+ {(?P<braced>%(id)s)} | # delimiter and a braced identifier
+ (?P<invalid>) # Other ill-formed delimiter exprs
+ )
+ """
+
+ def __init__(cls, name, bases, dct):
+ super(_TemplateMetaclass, cls).__init__(name, bases, dct)
+ if 'pattern' in dct:
+ pattern = cls.pattern
+ else:
+ pattern = _TemplateMetaclass.pattern % {
+ 'delim' : _re.escape(cls.delimiter),
+ 'id' : cls.idpattern,
+ }
+ cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
+
+
+class Template:
+ """A string class for supporting $-substitutions."""
+ __metaclass__ = _TemplateMetaclass
+
+ delimiter = '$'
+ idpattern = r'[_a-z][_a-z0-9]*'
+
+ def __init__(self, template):
+ self.template = template
+
+ # Search for $$, $identifier, ${identifier}, and any bare $'s
+
+ def _invalid(self, mo):
+ i = mo.start('invalid')
+ lines = self.template[:i].splitlines(True)
+ if not lines:
+ colno = 1
+ lineno = 1
+ else:
+ colno = i - len(''.join(lines[:-1]))
+ lineno = len(lines)
+ raise ValueError('Invalid placeholder in string: line %d, col %d' %
+ (lineno, colno))
+
+ def substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ val = mapping[named]
+ # We use this idiom instead of str() because the latter will
+ # fail if val is a Unicode containing non-ASCII characters.
+ return '%s' % val
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+ def safe_substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ named = mo.group('named')
+ if named is not None:
+ try:
+ # We use this idiom instead of str() because the latter
+ # will fail if val is a Unicode containing non-ASCII
+ return '%s' % mapping[named]
+ except KeyError:
+ return self.delimiter + named
+ braced = mo.group('braced')
+ if braced is not None:
+ try:
+ return '%s' % mapping[braced]
+ except KeyError:
+ return self.delimiter + '{' + braced + '}'
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ return self.delimiter
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+
+
+####################################################################
+# NOTE: Everything below here is deprecated. Use string methods instead.
+# This stuff will go away in Python 3.0.
+
+# Backward compatible names for exceptions
+index_error = ValueError
+atoi_error = ValueError
+atof_error = ValueError
+atol_error = ValueError
+
+# convert UPPER CASE letters to lower case
+def lower(s):
+ """lower(s) -> string
+
+ Return a copy of the string s converted to lowercase.
+
+ """
+ return s.lower()
+
+# Convert lower case letters to UPPER CASE
+def upper(s):
+ """upper(s) -> string
+
+ Return a copy of the string s converted to uppercase.
+
+ """
+ return s.upper()
+
+# Swap lower case letters and UPPER CASE
+def swapcase(s):
+ """swapcase(s) -> string
+
+ Return a copy of the string s with upper case characters
+ converted to lowercase and vice versa.
+
+ """
+ return s.swapcase()
+
+# Strip leading and trailing tabs and spaces
+def strip(s, chars=None):
+ """strip(s [,chars]) -> string
+
+ Return a copy of the string s with leading and trailing
+ whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+ If chars is unicode, S will be converted to unicode before stripping.
+
+ """
+ return s.strip(chars)
+
+# Strip leading tabs and spaces
+def lstrip(s, chars=None):
+ """lstrip(s [,chars]) -> string
+
+ Return a copy of the string s with leading whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.lstrip(chars)
+
+# Strip trailing tabs and spaces
+def rstrip(s, chars=None):
+ """rstrip(s [,chars]) -> string
+
+ Return a copy of the string s with trailing whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.rstrip(chars)
+
+
+# Split a string into a list of space/tab-separated words
+def split(s, sep=None, maxsplit=-1):
+ """split(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string. If maxsplit is given, splits at no more than
+ maxsplit places (resulting in at most maxsplit+1 words). If sep
+ is not specified or is None, any whitespace string is a separator.
+
+ (split and splitfields are synonymous)
+
+ """
+ return s.split(sep, maxsplit)
+splitfields = split
+
+# Split a string into a list of space/tab-separated words
+def rsplit(s, sep=None, maxsplit=-1):
+ """rsplit(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string, starting at the end of the string and working
+ to the front. If maxsplit is given, at most maxsplit splits are
+ done. If sep is not specified or is None, any whitespace string
+ is a separator.
+ """
+ return s.rsplit(sep, maxsplit)
+
+# Join fields with optional separator
+def join(words, sep = ' '):
+ """join(list [,sep]) -> string
+
+ Return a string composed of the words in list, with
+ intervening occurrences of sep. The default separator is a
+ single space.
+
+ (joinfields and join are synonymous)
+
+ """
+ return sep.join(words)
+joinfields = join
+
+# Find substring, raise exception if not found
+def index(s, *args):
+ """index(s, sub [,start [,end]]) -> int
+
+ Like find but raises ValueError when the substring is not found.
+
+ """
+ return s.index(*args)
+
+# Find last substring, raise exception if not found
+def rindex(s, *args):
+ """rindex(s, sub [,start [,end]]) -> int
+
+ Like rfind but raises ValueError when the substring is not found.
+
+ """
+ return s.rindex(*args)
+
+# Count non-overlapping occurrences of substring
+def count(s, *args):
+ """count(s, sub[, start[,end]]) -> int
+
+ Return the number of occurrences of substring sub in string
+ s[start:end]. Optional arguments start and end are
+ interpreted as in slice notation.
+
+ """
+ return s.count(*args)
+
+# Find substring, return -1 if not found
+def find(s, *args):
+ """find(s, sub [,start [,end]]) -> in
+
+ Return the lowest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.find(*args)
+
+# Find last substring, return -1 if not found
+def rfind(s, *args):
+ """rfind(s, sub [,start [,end]]) -> int
+
+ Return the highest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.rfind(*args)
+
+# for a bit of speed
+_float = float
+_int = int
+_long = long
+
+# Convert string to float
+def atof(s):
+ """atof(s) -> float
+
+ Return the floating point number represented by the string s.
+
+ """
+ return _float(s)
+
+
+# Convert string to integer
+def atoi(s , base=10):
+ """atoi(s [,base]) -> int
+
+ Return the integer represented by the string s in the given
+ base, which defaults to 10. The string s must consist of one
+ or more digits, possibly preceded by a sign. If base is 0, it
+ is chosen from the leading characters of s, 0 for octal, 0x or
+ 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
+ accepted.
+
+ """
+ return _int(s, base)
+
+
+# Convert string to long integer
+def atol(s, base=10):
+ """atol(s [,base]) -> long
+
+ Return the long integer represented by the string s in the
+ given base, which defaults to 10. The string s must consist
+ of one or more digits, possibly preceded by a sign. If base
+ is 0, it is chosen from the leading characters of s, 0 for
+ octal, 0x or 0X for hexadecimal. If base is 16, a preceding
+ 0x or 0X is accepted. A trailing L or l is not accepted,
+ unless base is 0.
+
+ """
+ return _long(s, base)
+
+
+# Left-justify a string
+def ljust(s, width, *args):
+ """ljust(s, width[, fillchar]) -> string
+
+ Return a left-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.ljust(width, *args)
+
+# Right-justify a string
+def rjust(s, width, *args):
+ """rjust(s, width[, fillchar]) -> string
+
+ Return a right-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.rjust(width, *args)
+
+# Center a string
+def center(s, width, *args):
+ """center(s, width[, fillchar]) -> string
+
+ Return a center version of s, in a field of the specified
+ width. padded with spaces as needed. The string is never
+ truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.center(width, *args)
+
+# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
+# Decadent feature: the argument may be a string or a number
+# (Use of this is deprecated; it should be a string as with ljust c.s.)
+def zfill(x, width):
+ """zfill(x, width) -> string
+
+ Pad a numeric string x with zeros on the left, to fill a field
+ of the specified width. The string x is never truncated.
+
+ """
+ if not isinstance(x, basestring):
+ x = repr(x)
+ return x.zfill(width)
+
+# Expand tabs in a string.
+# Doesn't take non-printing chars into account, but does understand \n.
+def expandtabs(s, tabsize=8):
+ """expandtabs(s [,tabsize]) -> string
+
+ Return a copy of the string s with all tab characters replaced
+ by the appropriate number of spaces, depending on the current
+ column, and the tabsize (default 8).
+
+ """
+ return s.expandtabs(tabsize)
+
+# Character translation through look-up table.
+def translate(s, table, deletions=""):
+ """translate(s,table [,deletions]) -> string
+
+ Return a copy of the string s, where all characters occurring
+ in the optional argument deletions are removed, and the
+ remaining characters have been mapped through the given
+ translation table, which must be a string of length 256. The
+ deletions argument is not allowed for Unicode strings.
+
+ """
+ if deletions:
+ return s.translate(table, deletions)
+ else:
+ # Add s[:0] so that if s is Unicode and table is an 8-bit string,
+ # table is converted to Unicode. This means that table *cannot*
+ # be a dictionary -- for that feature, use u.translate() directly.
+ return s.translate(table + s[:0])
+
+# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
+def capitalize(s):
+ """capitalize(s) -> string
+
+ Return a copy of the string s with only its first character
+ capitalized.
+
+ """
+ return s.capitalize()
+
+# Substring replacement (global)
+def replace(s, old, new, maxsplit=-1):
+ """replace (str, old, new[, maxsplit]) -> string
+
+ Return a copy of string str with all occurrences of substring
+ old replaced by new. If the optional argument maxsplit is
+ given, only the first maxsplit occurrences are replaced.
+
+ """
+ return s.replace(old, new, maxsplit)
+
+
+# Try importing optional built-in module "strop" -- if it exists,
+# it redefines some string operations that are 100-1000 times faster.
+# It also defines values for whitespace, lowercase and uppercase
+# that match <ctype.h>'s definitions.
+
+try:
+ from strop import maketrans, lowercase, uppercase, whitespace
+ letters = lowercase + uppercase
+except ImportError:
+ pass # Use the original versions
diff --git a/paste/util/subprocess24.py b/paste/util/subprocess24.py
new file mode 100644
index 0000000..57ec119
--- /dev/null
+++ b/paste/util/subprocess24.py
@@ -0,0 +1,1152 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# This module should remain compatible with Python 2.2, see PEP 291.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+r"""subprocess - Subprocesses with accessible I/O streams
+
+This module allows you to spawn processes, connect to their
+input/output/error pipes, and obtain their return codes. This module
+intends to replace several other, older modules and functions, like:
+
+os.system
+os.spawn*
+os.popen*
+popen2.*
+commands.*
+
+Information about how the subprocess module can be used to replace these
+modules and functions can be found below.
+
+
+
+Using the subprocess module
+===========================
+This module defines one class called Popen:
+
+class Popen(args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+
+
+Arguments are:
+
+args should be a string, or a sequence of program arguments. The
+program to execute is normally the first item in the args sequence or
+string, but can be explicitly set by using the executable argument.
+
+On UNIX, with shell=False (default): In this case, the Popen class
+uses os.execvp() to execute the child program. args should normally
+be a sequence. A string will be treated as a sequence with the string
+as the only item (the program to execute).
+
+On UNIX, with shell=True: If args is a string, it specifies the
+command string to execute through the shell. If args is a sequence,
+the first item specifies the command string, and any additional items
+will be treated as additional shell arguments.
+
+On Windows: the Popen class uses CreateProcess() to execute the child
+program, which operates on strings. If args is a sequence, it will be
+converted to a string using the list2cmdline method. Please note that
+not all MS Windows applications interpret the command line the same
+way: The list2cmdline is designed for applications using the same
+rules as the MS C runtime.
+
+bufsize, if given, has the same meaning as the corresponding argument
+to the built-in open() function: 0 means unbuffered, 1 means line
+buffered, any other positive value means use a buffer of
+(approximately) that size. A negative bufsize means to use the system
+default, which usually means fully buffered. The default value for
+bufsize is 0 (unbuffered).
+
+stdin, stdout and stderr specify the executed programs' standard
+input, standard output and standard error file handles, respectively.
+Valid values are PIPE, an existing file descriptor (a positive
+integer), an existing file object, and None. PIPE indicates that a
+new pipe to the child should be created. With None, no redirection
+will occur; the child's file handles will be inherited from the
+parent. Additionally, stderr can be STDOUT, which indicates that the
+stderr data from the applications should be captured into the same
+file handle as for stdout.
+
+If preexec_fn is set to a callable object, this object will be called
+in the child process just before the child is executed.
+
+If close_fds is true, all file descriptors except 0, 1 and 2 will be
+closed before the child process is executed.
+
+if shell is true, the specified command will be executed through the
+shell.
+
+If cwd is not None, the current directory will be changed to cwd
+before the child is executed.
+
+If env is not None, it defines the environment variables for the new
+process.
+
+If universal_newlines is true, the file objects stdout and stderr are
+opened as a text files, but lines may be terminated by any of '\n',
+the Unix end-of-line convention, '\r', the Macintosh convention or
+'\r\n', the Windows convention. All of these external representations
+are seen as '\n' by the Python program. Note: This feature is only
+available if Python is built with universal newline support (the
+default). Also, the newlines attribute of the file objects stdout,
+stdin and stderr are not updated by the communicate() method.
+
+The startupinfo and creationflags, if given, will be passed to the
+underlying CreateProcess() function. They can specify things such as
+appearance of the main window and priority for the new process.
+(Windows only)
+
+
+This module also defines two shortcut functions:
+
+call(*args, **kwargs):
+ Run command with arguments. Wait for command to complete, then
+ return the returncode attribute. The arguments are the same as for
+ the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+
+
+Exceptions
+----------
+Exceptions raised in the child process, before the new program has
+started to execute, will be re-raised in the parent. Additionally,
+the exception object will have one extra attribute called
+'child_traceback', which is a string containing traceback information
+from the childs point of view.
+
+The most common exception raised is OSError. This occurs, for
+example, when trying to execute a non-existent file. Applications
+should prepare for OSErrors.
+
+A ValueError will be raised if Popen is called with invalid arguments.
+
+
+Security
+--------
+Unlike some other popen functions, this implementation will never call
+/bin/sh implicitly. This means that all characters, including shell
+metacharacters, can safely be passed to child processes.
+
+
+Popen objects
+=============
+Instances of the Popen class have the following methods:
+
+poll()
+ Check if child process has terminated. Returns returncode
+ attribute.
+
+wait()
+ Wait for child process to terminate. Returns returncode attribute.
+
+communicate(input=None)
+ Interact with process: Send data to stdin. Read data from stdout
+ and stderr, until end-of-file is reached. Wait for process to
+ terminate. The optional stdin argument should be a string to be
+ sent to the child process, or None, if no data should be sent to
+ the child.
+
+ communicate() returns a tuple (stdout, stderr).
+
+ Note: The data read is buffered in memory, so do not use this
+ method if the data size is large or unlimited.
+
+The following attributes are also available:
+
+stdin
+ If the stdin argument is PIPE, this attribute is a file object
+ that provides input to the child process. Otherwise, it is None.
+
+stdout
+ If the stdout argument is PIPE, this attribute is a file object
+ that provides output from the child process. Otherwise, it is
+ None.
+
+stderr
+ If the stderr argument is PIPE, this attribute is file object that
+ provides error output from the child process. Otherwise, it is
+ None.
+
+pid
+ The process ID of the child process.
+
+returncode
+ The child return code. A None value indicates that the process
+ hasn't terminated yet. A negative value -N indicates that the
+ child was terminated by signal N (UNIX only).
+
+
+Replacing older functions with the subprocess module
+====================================================
+In this section, "a ==> b" means that b can be used as a replacement
+for a.
+
+Note: All functions in this section fail (more or less) silently if
+the executed program cannot be found; this module raises an OSError
+exception.
+
+In the following examples, we assume that the subprocess module is
+imported with "from subprocess import *".
+
+
+Replacing /bin/sh shell backquote
+---------------------------------
+output=`mycmd myarg`
+==>
+output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
+
+
+Replacing shell pipe line
+-------------------------
+output=`dmesg | grep hda`
+==>
+p1 = Popen(["dmesg"], stdout=PIPE)
+p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+output = p2.communicate()[0]
+
+
+Replacing os.system()
+---------------------
+sts = os.system("mycmd" + " myarg")
+==>
+p = Popen("mycmd" + " myarg", shell=True)
+sts = os.waitpid(p.pid, 0)
+
+Note:
+
+* Calling the program through the shell is usually not required.
+
+* It's easier to look at the returncode attribute than the
+ exitstatus.
+
+A more real-world example would look like this:
+
+try:
+ retcode = call("mycmd" + " myarg", shell=True)
+ if retcode < 0:
+ print >>sys.stderr, "Child was terminated by signal", -retcode
+ else:
+ print >>sys.stderr, "Child returned", retcode
+except OSError, e:
+ print >>sys.stderr, "Execution failed:", e
+
+
+Replacing os.spawn*
+-------------------
+P_NOWAIT example:
+
+pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+pid = Popen(["/bin/mycmd", "myarg"]).pid
+
+
+P_WAIT example:
+
+retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+retcode = call(["/bin/mycmd", "myarg"])
+
+
+Vector example:
+
+os.spawnvp(os.P_NOWAIT, path, args)
+==>
+Popen([path] + args[1:])
+
+
+Environment example:
+
+os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
+==>
+Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
+
+
+Replacing os.popen*
+-------------------
+pipe = os.popen(cmd, mode='r', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
+
+pipe = os.popen(cmd, mode='w', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
+
+
+(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdin, child_stdout) = (p.stdin, p.stdout)
+
+
+(child_stdin,
+ child_stdout,
+ child_stderr) = os.popen3(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
+(child_stdin,
+ child_stdout,
+ child_stderr) = (p.stdin, p.stdout, p.stderr)
+
+
+(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
+(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
+
+
+Replacing popen2.*
+------------------
+Note: If the cmd argument to popen2 functions is a string, the command
+is executed through /bin/sh. If it is a list, the command is directly
+executed.
+
+(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
+==>
+p = Popen(["somestring"], shell=True, bufsize=bufsize
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+
+(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
+==>
+p = Popen(["mycmd", "myarg"], bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
+except that:
+
+* subprocess.Popen raises an exception if the execution fails
+* the capturestderr argument is replaced with the stderr argument.
+* stdin=PIPE and stdout=PIPE must be specified.
+* popen2 closes all filedescriptors by default, but you have to specify
+ close_fds=True with subprocess.Popen.
+
+
+"""
+
+import sys
+mswindows = (sys.platform == "win32")
+
+import os
+import types
+import traceback
+
+if mswindows:
+ import threading
+ import msvcrt
+ ## @@: Changed in Paste
+ ## Since this module is only used on pre-python-2.4 systems, they probably
+ ## don't have _subprocess installed, but hopefully have the win32 stuff
+ ## installed.
+ if 1: # <-- change this to use pywin32 instead of the _subprocess driver
+ import pywintypes
+ from win32api import GetStdHandle, STD_INPUT_HANDLE, \
+ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
+ from win32api import GetCurrentProcess, DuplicateHandle, \
+ GetModuleFileName, GetVersion
+ from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
+ from win32pipe import CreatePipe
+ from win32process import CreateProcess, STARTUPINFO, \
+ GetExitCodeProcess, STARTF_USESTDHANDLES, \
+ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
+ from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
+ else:
+ from _subprocess import *
+ class STARTUPINFO:
+ dwFlags = 0
+ hStdInput = None
+ hStdOutput = None
+ hStdError = None
+ class pywintypes:
+ error = IOError
+else:
+ import select
+ import errno
+ import fcntl
+ import pickle
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call"]
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except:
+ MAXFD = 256
+
+# True/False does not exist on 2.2.0
+try:
+ False
+except NameError:
+ False = 0
+ True = 1
+
+_active = []
+
+def _cleanup():
+ for inst in _active[:]:
+ inst.poll()
+
+PIPE = -1
+STDOUT = -2
+
+
+def call(*args, **kwargs):
+ """Run command with arguments. Wait for command to complete, then
+ return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+ """
+ return Popen(*args, **kwargs).wait()
+
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg)
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backspaces.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backspaces, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
+
+
+class Popen(object):
+ def __init__(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+ """Create new Popen instance."""
+ _cleanup()
+
+ if not isinstance(bufsize, (int, long)):
+ raise TypeError("bufsize must be an integer")
+
+ if mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows "
+ "platforms")
+ if close_fds:
+ raise ValueError("close_fds is not supported on Windows "
+ "platforms")
+ else:
+ # POSIX
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows "
+ "platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows "
+ "platforms")
+
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.universal_newlines = universal_newlines
+
+ # Input and output objects. The general principle is like
+ # this:
+ #
+ # Parent Child
+ # ------ -----
+ # p2cwrite ---stdin---> p2cread
+ # c2pread <--stdout--- c2pwrite
+ # errread <--stderr--- errwrite
+ #
+ # On POSIX, the child objects are file descriptors. On
+ # Windows, these are Windows file handles. The parent objects
+ # are file descriptors on both platforms. The parent objects
+ # are None when not using PIPEs. The child objects are None
+ # when not redirecting.
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ if p2cwrite:
+ self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
+ if c2pread:
+ if universal_newlines:
+ self.stdout = os.fdopen(c2pread, 'rU', bufsize)
+ else:
+ self.stdout = os.fdopen(c2pread, 'rb', bufsize)
+ if errread:
+ if universal_newlines:
+ self.stderr = os.fdopen(errread, 'rU', bufsize)
+ else:
+ self.stderr = os.fdopen(errread, 'rb', bufsize)
+
+ _active.append(self)
+
+
+ def _translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+ return data
+
+
+ if mswindows:
+ #
+ # Windows methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ if stdin == None and stdout == None and stderr == None:
+ return (None, None, None, None, None, None)
+
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin == None:
+ p2cread = GetStdHandle(STD_INPUT_HANDLE)
+ elif stdin == PIPE:
+ p2cread, p2cwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ p2cwrite = p2cwrite.Detach()
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
+ elif type(stdin) == types.IntType:
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ # Assuming file-like object
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout == None:
+ c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
+ elif stdout == PIPE:
+ c2pread, c2pwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ c2pread = c2pread.Detach()
+ c2pread = msvcrt.open_osfhandle(c2pread, 0)
+ elif type(stdout) == types.IntType:
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ # Assuming file-like object
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr == None:
+ errwrite = GetStdHandle(STD_ERROR_HANDLE)
+ elif stderr == PIPE:
+ errread, errwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ errread = errread.Detach()
+ errread = msvcrt.open_osfhandle(errread, 0)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif type(stderr) == types.IntType:
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ # Assuming file-like object
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _make_inheritable(self, handle):
+ """Return a duplicate of handle, which is inheritable"""
+ return DuplicateHandle(GetCurrentProcess(), handle,
+ GetCurrentProcess(), 0, 1,
+ DUPLICATE_SAME_ACCESS)
+
+
+ def _find_w9xpopen(self):
+ """Find and return absolut path to w9xpopen.exe"""
+ w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ # Eeek - file-not-found - possibly an embedding
+ # situation - see if we can locate it in sys.exec_prefix
+ w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ raise RuntimeError("Cannot locate w9xpopen.exe, which is "
+ "needed for Popen to work with your "
+ "shell or platform.")
+ return w9xpopen
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (MS Windows version)"""
+
+ if not isinstance(args, types.StringTypes):
+ args = list2cmdline(args)
+
+ # Process startup details
+ default_startupinfo = STARTUPINFO()
+ if startupinfo == None:
+ startupinfo = default_startupinfo
+ if not None in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags |= STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ if shell:
+ default_startupinfo.dwFlags |= STARTF_USESHOWWINDOW
+ default_startupinfo.wShowWindow = SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+ if (GetVersion() >= 0x80000000L or
+ os.path.basename(comspec).lower() == "command.com"):
+ # Win9x, or using command.com on NT. We need to
+ # use the w9xpopen intermediate program. For more
+ # information, see KB Q150956
+ # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
+ w9xpopen = self._find_w9xpopen()
+ args = '"%s" %s' % (w9xpopen, args)
+ # Not passing CREATE_NEW_CONSOLE has been known to
+ # cause random failures on win9x. Specifically a
+ # dialog: "Your program accessed mem currently in
+ # use at xxx" and a hopeful warning about the
+ # stability of your system. Cost is Ctrl+C wont
+ # kill children.
+ creationflags |= CREATE_NEW_CONSOLE
+
+ # Start the process
+ try:
+ hp, ht, pid, tid = CreateProcess(executable, args,
+ # no special security
+ None, None,
+ # must inherit handles to pass std
+ # handles
+ 1,
+ creationflags,
+ env,
+ cwd,
+ startupinfo)
+ except pywintypes.error, e:
+ # Translate pywintypes.error to WindowsError, which is
+ # a subclass of OSError. FIXME: We should really
+ # translate errno using _sys_errlist (or simliar), but
+ # how can this be done from Python?
+ raise WindowsError(*e.args)
+
+ # Retain the process handle, but close the thread handle
+ self._handle = hp
+ self.pid = pid
+ ht.Close()
+
+ # Child is launched. Close the parent's copy of those pipe
+ # handles that only the child should have open. You need
+ # to make sure that no handles to the write end of the
+ # output pipe are maintained in this process or else the
+ # pipe will not close when the child process exits and the
+ # ReadFile will hang.
+ if p2cread != None:
+ p2cread.Close()
+ if c2pwrite != None:
+ c2pwrite.Close()
+ if errwrite != None:
+ errwrite.Close()
+
+
+ def poll(self):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
+ self.returncode = GetExitCodeProcess(self._handle)
+ _active.remove(self)
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ obj = WaitForSingleObject(self._handle, INFINITE)
+ self.returncode = GetExitCodeProcess(self._handle)
+ _active.remove(self)
+ return self.returncode
+
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+
+
+ def communicate(self, input=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdout:
+ stdout = []
+ stdout_thread = threading.Thread(target=self._readerthread,
+ args=(self.stdout, stdout))
+ stdout_thread.setDaemon(True)
+ stdout_thread.start()
+ if self.stderr:
+ stderr = []
+ stderr_thread = threading.Thread(target=self._readerthread,
+ args=(self.stderr, stderr))
+ stderr_thread.setDaemon(True)
+ stderr_thread.start()
+
+ if self.stdin:
+ if input != None:
+ self.stdin.write(input)
+ self.stdin.close()
+
+ if self.stdout:
+ stdout_thread.join()
+ if self.stderr:
+ stderr_thread.join()
+
+ # All data exchanged. Translate lists into strings.
+ if stdout != None:
+ stdout = stdout[0]
+ if stderr != None:
+ stderr = stderr[0]
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(open, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+ else:
+ #
+ # POSIX methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin == None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = os.pipe()
+ elif type(stdin) == types.IntType:
+ p2cread = stdin
+ else:
+ # Assuming file-like object
+ p2cread = stdin.fileno()
+
+ if stdout == None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = os.pipe()
+ elif type(stdout) == types.IntType:
+ c2pwrite = stdout
+ else:
+ # Assuming file-like object
+ c2pwrite = stdout.fileno()
+
+ if stderr == None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = os.pipe()
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif type(stderr) == types.IntType:
+ errwrite = stderr
+ else:
+ # Assuming file-like object
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _set_cloexec_flag(self, fd):
+ try:
+ cloexec_flag = fcntl.FD_CLOEXEC
+ except AttributeError:
+ cloexec_flag = 1
+
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+
+
+ def _close_fds(self, but):
+ for i in range(3, MAXFD):
+ if i == but:
+ continue
+ try:
+ os.close(i)
+ except:
+ pass
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (POSIX version)"""
+
+ if isinstance(args, types.StringTypes):
+ args = [args]
+
+ if shell:
+ args = ["/bin/sh", "-c"] + args
+
+ if executable == None:
+ executable = args[0]
+
+ # For transferring possible exec failure from child to parent
+ # The first char specifies the exception type: 0 means
+ # OSError, 1 means some other error.
+ errpipe_read, errpipe_write = os.pipe()
+ self._set_cloexec_flag(errpipe_write)
+
+ self.pid = os.fork()
+ if self.pid == 0:
+ # Child
+ try:
+ # Close parent's pipe ends
+ if p2cwrite:
+ os.close(p2cwrite)
+ if c2pread:
+ os.close(c2pread)
+ if errread:
+ os.close(errread)
+ os.close(errpipe_read)
+
+ # Dup fds for child
+ if p2cread:
+ os.dup2(p2cread, 0)
+ if c2pwrite:
+ os.dup2(c2pwrite, 1)
+ if errwrite:
+ os.dup2(errwrite, 2)
+
+ # Close pipe fds. Make sure we doesn't close the same
+ # fd more than once.
+ if p2cread:
+ os.close(p2cread)
+ if c2pwrite and c2pwrite not in (p2cread,):
+ os.close(c2pwrite)
+ if errwrite and errwrite not in (p2cread, c2pwrite):
+ os.close(errwrite)
+
+ # Close all other fds, if asked for
+ if close_fds:
+ self._close_fds(but=errpipe_write)
+
+ if cwd != None:
+ os.chdir(cwd)
+
+ if preexec_fn:
+ apply(preexec_fn)
+
+ if env == None:
+ os.execvp(executable, args)
+ else:
+ os.execvpe(executable, args, env)
+
+ except:
+ exc_type, exc_value, tb = sys.exc_info()
+ # Save the traceback and attach it to the exception object
+ exc_lines = traceback.format_exception(exc_type,
+ exc_value,
+ tb)
+ exc_value.child_traceback = ''.join(exc_lines)
+ os.write(errpipe_write, pickle.dumps(exc_value))
+
+ # This exitcode won't be reported to applications, so it
+ # really doesn't matter what we return.
+ os._exit(255)
+
+ # Parent
+ os.close(errpipe_write)
+ if p2cread and p2cwrite:
+ os.close(p2cread)
+ if c2pwrite and c2pread:
+ os.close(c2pwrite)
+ if errwrite and errread:
+ os.close(errwrite)
+
+ # Wait for exec to fail or succeed; possibly raising exception
+ data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
+ os.close(errpipe_read)
+ if data != "":
+ os.waitpid(self.pid, 0)
+ child_exception = pickle.loads(data)
+ raise child_exception
+
+
+ def _handle_exitstatus(self, sts):
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ self.returncode = os.WEXITSTATUS(sts)
+ else:
+ # Should never happen
+ raise RuntimeError("Unknown child exit status!")
+
+ _active.remove(self)
+
+
+ def poll(self):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ try:
+ pid, sts = os.waitpid(self.pid, os.WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except os.error:
+ pass
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ pid, sts = os.waitpid(self.pid, 0)
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+
+ def communicate(self, input=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+ read_set = []
+ write_set = []
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = []
+ if self.stderr:
+ read_set.append(self.stderr)
+ stderr = []
+
+ while read_set or write_set:
+ rlist, wlist, xlist = select.select(read_set, write_set, [])
+
+ if self.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ bytes_written = os.write(self.stdin.fileno(), input[:512])
+ input = input[bytes_written:]
+ if not input:
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = os.read(self.stdout.fileno(), 1024)
+ if data == "":
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ stdout.append(data)
+
+ if self.stderr in rlist:
+ data = os.read(self.stderr.fileno(), 1024)
+ if data == "":
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ stderr.append(data)
+
+ # All data exchanged. Translate lists into strings.
+ if stdout != None:
+ stdout = ''.join(stdout)
+ if stderr != None:
+ stderr = ''.join(stderr)
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(open, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+
+def _demo_posix():
+ #
+ # Example 1: Simple redirection: Get process list
+ #
+ plist = Popen(["ps"], stdout=PIPE).communicate()[0]
+ print "Process list:"
+ print plist
+
+ #
+ # Example 2: Change uid before executing child
+ #
+ if os.getuid() == 0:
+ p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
+ p.wait()
+
+ #
+ # Example 3: Connecting several subprocesses
+ #
+ print "Looking for 'hda'..."
+ p1 = Popen(["dmesg"], stdout=PIPE)
+ p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 4: Catch execution error
+ #
+ print
+ print "Trying a weird file..."
+ try:
+ print Popen(["/this/path/does/not/exist"]).communicate()
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ print "The file didn't exist. I thought so..."
+ print "Child traceback:"
+ print e.child_traceback
+ else:
+ print "Error", e.errno
+ else:
+ print >>sys.stderr, "Gosh. No error."
+
+
+def _demo_windows():
+ #
+ # Example 1: Connecting several subprocesses
+ #
+ print "Looking for 'PROMPT' in set output..."
+ p1 = Popen("set", stdout=PIPE, shell=True)
+ p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 2: Simple execution of program
+ #
+ print "Executing calc..."
+ p = Popen("calc")
+ p.wait()
+
+
+if __name__ == "__main__":
+ if mswindows:
+ _demo_windows()
+ else:
+ _demo_posix()
diff --git a/paste/util/template.py b/paste/util/template.py
new file mode 100644
index 0000000..1e42b5a
--- /dev/null
+++ b/paste/util/template.py
@@ -0,0 +1,758 @@
+"""
+A small templating language
+
+This implements a small templating language for use internally in
+Paste and Paste Script. This language implements if/elif/else,
+for/continue/break, expressions, and blocks of Python code. The
+syntax is::
+
+ {{any expression (function calls etc)}}
+ {{any expression | filter}}
+ {{for x in y}}...{{endfor}}
+ {{if x}}x{{elif y}}y{{else}}z{{endif}}
+ {{py:x=1}}
+ {{py:
+ def foo(bar):
+ return 'baz'
+ }}
+ {{default var = default_value}}
+ {{# comment}}
+
+You use this with the ``Template`` class or the ``sub`` shortcut.
+The ``Template`` class takes the template string and the name of
+the template (for errors) and a default namespace. Then (like
+``string.Template``) you can call the ``tmpl.substitute(**kw)``
+method to make a substitution (or ``tmpl.substitute(a_dict)``).
+
+``sub(content, **kw)`` substitutes the template immediately. You
+can use ``__name='tmpl.html'`` to set the name of the template.
+
+If there are syntax errors ``TemplateError`` will be raised.
+"""
+
+import re
+import sys
+import cgi
+import urllib
+from paste.util.looper import looper
+
+__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
+ 'sub_html', 'html', 'bunch']
+
+token_re = re.compile(r'\{\{|\}\}')
+in_re = re.compile(r'\s+in\s+')
+var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+
+class TemplateError(Exception):
+ """Exception raised while parsing a template
+ """
+
+ def __init__(self, message, position, name=None):
+ self.message = message
+ self.position = position
+ self.name = name
+
+ def __str__(self):
+ msg = '%s at line %s column %s' % (
+ self.message, self.position[0], self.position[1])
+ if self.name:
+ msg += ' in %s' % self.name
+ return msg
+
+class _TemplateContinue(Exception):
+ pass
+
+class _TemplateBreak(Exception):
+ pass
+
+class Template(object):
+
+ default_namespace = {
+ 'start_braces': '{{',
+ 'end_braces': '}}',
+ 'looper': looper,
+ }
+
+ default_encoding = 'utf8'
+
+ def __init__(self, content, name=None, namespace=None):
+ self.content = content
+ self._unicode = isinstance(content, unicode)
+ self.name = name
+ self._parsed = parse(content, name=name)
+ if namespace is None:
+ namespace = {}
+ self.namespace = namespace
+
+ def from_filename(cls, filename, namespace=None, encoding=None):
+ f = open(filename, 'rb')
+ c = f.read()
+ f.close()
+ if encoding:
+ c = c.decode(encoding)
+ return cls(content=c, name=filename, namespace=namespace)
+
+ from_filename = classmethod(from_filename)
+
+ def __repr__(self):
+ return '<%s %s name=%r>' % (
+ self.__class__.__name__,
+ hex(id(self))[2:], self.name)
+
+ def substitute(self, *args, **kw):
+ if args:
+ if kw:
+ raise TypeError(
+ "You can only give positional *or* keyword arguments")
+ if len(args) > 1:
+ raise TypeError(
+ "You can only give on positional argument")
+ kw = args[0]
+ ns = self.default_namespace.copy()
+ ns.update(self.namespace)
+ ns.update(kw)
+ result = self._interpret(ns)
+ return result
+
+ def _interpret(self, ns):
+ __traceback_hide__ = True
+ parts = []
+ self._interpret_codes(self._parsed, ns, out=parts)
+ return ''.join(parts)
+
+ def _interpret_codes(self, codes, ns, out):
+ __traceback_hide__ = True
+ for item in codes:
+ if isinstance(item, basestring):
+ out.append(item)
+ else:
+ self._interpret_code(item, ns, out)
+
+ def _interpret_code(self, code, ns, out):
+ __traceback_hide__ = True
+ name, pos = code[0], code[1]
+ if name == 'py':
+ self._exec(code[2], ns, pos)
+ elif name == 'continue':
+ raise _TemplateContinue()
+ elif name == 'break':
+ raise _TemplateBreak()
+ elif name == 'for':
+ vars, expr, content = code[2], code[3], code[4]
+ expr = self._eval(expr, ns, pos)
+ self._interpret_for(vars, expr, content, ns, out)
+ elif name == 'cond':
+ parts = code[2:]
+ self._interpret_if(parts, ns, out)
+ elif name == 'expr':
+ parts = code[2].split('|')
+ base = self._eval(parts[0], ns, pos)
+ for part in parts[1:]:
+ func = self._eval(part, ns, pos)
+ base = func(base)
+ out.append(self._repr(base, pos))
+ elif name == 'default':
+ var, expr = code[2], code[3]
+ if var not in ns:
+ result = self._eval(expr, ns, pos)
+ ns[var] = result
+ elif name == 'comment':
+ return
+ else:
+ assert 0, "Unknown code: %r" % name
+
+ def _interpret_for(self, vars, expr, content, ns, out):
+ __traceback_hide__ = True
+ for item in expr:
+ if len(vars) == 1:
+ ns[vars[0]] = item
+ else:
+ if len(vars) != len(item):
+ raise ValueError(
+ 'Need %i items to unpack (got %i items)'
+ % (len(vars), len(item)))
+ for name, value in zip(vars, item):
+ ns[name] = value
+ try:
+ self._interpret_codes(content, ns, out)
+ except _TemplateContinue:
+ continue
+ except _TemplateBreak:
+ break
+
+ def _interpret_if(self, parts, ns, out):
+ __traceback_hide__ = True
+ # @@: if/else/else gets through
+ for part in parts:
+ assert not isinstance(part, basestring)
+ name, pos = part[0], part[1]
+ if name == 'else':
+ result = True
+ else:
+ result = self._eval(part[2], ns, pos)
+ if result:
+ self._interpret_codes(part[3], ns, out)
+ break
+
+ def _eval(self, code, ns, pos):
+ __traceback_hide__ = True
+ try:
+ value = eval(code, ns)
+ return value
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ if getattr(e, 'args'):
+ arg0 = e.args[0]
+ else:
+ arg0 = str(e)
+ e.args = (self._add_line_info(arg0, pos),)
+ raise exc_info[0], e, exc_info[2]
+
+ def _exec(self, code, ns, pos):
+ __traceback_hide__ = True
+ try:
+ exec code in ns
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ e.args = (self._add_line_info(e.args[0], pos),)
+ raise exc_info[0], e, exc_info[2]
+
+ def _repr(self, value, pos):
+ __traceback_hide__ = True
+ try:
+ if value is None:
+ return ''
+ if self._unicode:
+ try:
+ value = unicode(value)
+ except UnicodeDecodeError:
+ value = str(value)
+ else:
+ value = str(value)
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ e.args = (self._add_line_info(e.args[0], pos),)
+ raise exc_info[0], e, exc_info[2]
+ else:
+ if self._unicode and isinstance(value, str):
+ if not self.decode_encoding:
+ raise UnicodeDecodeError(
+ 'Cannot decode str value %r into unicode '
+ '(no default_encoding provided)' % value)
+ value = value.decode(self.default_encoding)
+ elif not self._unicode and isinstance(value, unicode):
+ if not self.decode_encoding:
+ raise UnicodeEncodeError(
+ 'Cannot encode unicode value %r into str '
+ '(no default_encoding provided)' % value)
+ value = value.encode(self.default_encoding)
+ return value
+
+
+ def _add_line_info(self, msg, pos):
+ msg = "%s at line %s column %s" % (
+ msg, pos[0], pos[1])
+ if self.name:
+ msg += " in file %s" % self.name
+ return msg
+
+def sub(content, **kw):
+ name = kw.get('__name')
+ tmpl = Template(content, name=name)
+ return tmpl.substitute(kw)
+ return result
+
+def paste_script_template_renderer(content, vars, filename=None):
+ tmpl = Template(content, name=filename)
+ return tmpl.substitute(vars)
+
+class bunch(dict):
+
+ def __init__(self, **kw):
+ for name, value in kw.items():
+ setattr(self, name, value)
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(self, key):
+ if 'default' in self:
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ return dict.__getitem__(self, 'default')
+ else:
+ return dict.__getitem__(self, key)
+
+ def __repr__(self):
+ items = [
+ (k, v) for k, v in self.items()]
+ items.sort()
+ return '<%s %s>' % (
+ self.__class__.__name__,
+ ' '.join(['%s=%r' % (k, v) for k, v in items]))
+
+############################################################
+## HTML Templating
+############################################################
+
+class html(object):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return self.value
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__, self.value)
+
+def html_quote(value):
+ if value is None:
+ return ''
+ if not isinstance(value, basestring):
+ if hasattr(value, '__unicode__'):
+ value = unicode(value)
+ else:
+ value = str(value)
+ value = cgi.escape(value, 1)
+ if isinstance(value, unicode):
+ value = value.encode('ascii', 'xmlcharrefreplace')
+ return value
+
+def url(v):
+ if not isinstance(v, basestring):
+ if hasattr(v, '__unicode__'):
+ v = unicode(v)
+ else:
+ v = str(v)
+ if isinstance(v, unicode):
+ v = v.encode('utf8')
+ return urllib.quote(v)
+
+def attr(**kw):
+ kw = kw.items()
+ kw.sort()
+ parts = []
+ for name, value in kw:
+ if value is None:
+ continue
+ if name.endswith('_'):
+ name = name[:-1]
+ parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
+ return html(' '.join(parts))
+
+class HTMLTemplate(Template):
+
+ default_namespace = Template.default_namespace.copy()
+ default_namespace.update(dict(
+ html=html,
+ attr=attr,
+ url=url,
+ ))
+
+ def _repr(self, value, pos):
+ plain = Template._repr(self, value, pos)
+ if isinstance(value, html):
+ return plain
+ else:
+ return html_quote(plain)
+
+def sub_html(content, **kw):
+ name = kw.get('__name')
+ tmpl = HTMLTemplate(content, name=name)
+ return tmpl.substitute(kw)
+ return result
+
+
+############################################################
+## Lexing and Parsing
+############################################################
+
+def lex(s, name=None, trim_whitespace=True):
+ """
+ Lex a string into chunks:
+
+ >>> lex('hey')
+ ['hey']
+ >>> lex('hey {{you}}')
+ ['hey ', ('you', (1, 7))]
+ >>> lex('hey {{')
+ Traceback (most recent call last):
+ ...
+ TemplateError: No }} to finish last expression at line 1 column 7
+ >>> lex('hey }}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: }} outside expression at line 1 column 7
+ >>> lex('hey {{ {{')
+ Traceback (most recent call last):
+ ...
+ TemplateError: {{ inside expression at line 1 column 10
+
+ """
+ in_expr = False
+ chunks = []
+ last = 0
+ last_pos = (1, 1)
+ for match in token_re.finditer(s):
+ expr = match.group(0)
+ pos = find_position(s, match.end())
+ if expr == '{{' and in_expr:
+ raise TemplateError('{{ inside expression', position=pos,
+ name=name)
+ elif expr == '}}' and not in_expr:
+ raise TemplateError('}} outside expression', position=pos,
+ name=name)
+ if expr == '{{':
+ part = s[last:match.start()]
+ if part:
+ chunks.append(part)
+ in_expr = True
+ else:
+ chunks.append((s[last:match.start()], last_pos))
+ in_expr = False
+ last = match.end()
+ last_pos = pos
+ if in_expr:
+ raise TemplateError('No }} to finish last expression',
+ name=name, position=last_pos)
+ part = s[last:]
+ if part:
+ chunks.append(part)
+ if trim_whitespace:
+ chunks = trim_lex(chunks)
+ return chunks
+
+statement_re = re.compile(r'^(?:if |elif |else |for |py:)')
+single_statements = ['endif', 'endfor', 'continue', 'break']
+trail_whitespace_re = re.compile(r'\n[\t ]*$')
+lead_whitespace_re = re.compile(r'^[\t ]*\n')
+
+def trim_lex(tokens):
+ r"""
+ Takes a lexed set of tokens, and removes whitespace when there is
+ a directive on a line by itself:
+
+ >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+ >>> tokens
+ [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+ >>> trim_lex(tokens)
+ [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+ """
+ for i in range(len(tokens)):
+ current = tokens[i]
+ if isinstance(tokens[i], basestring):
+ # we don't trim this
+ continue
+ item = current[0]
+ if not statement_re.search(item) and item not in single_statements:
+ continue
+ if not i:
+ prev = ''
+ else:
+ prev = tokens[i-1]
+ if i+1 >= len(tokens):
+ next = ''
+ else:
+ next = tokens[i+1]
+ if (not isinstance(next, basestring)
+ or not isinstance(prev, basestring)):
+ continue
+ if ((not prev or trail_whitespace_re.search(prev))
+ and (not next or lead_whitespace_re.search(next))):
+ if prev:
+ m = trail_whitespace_re.search(prev)
+ # +1 to leave the leading \n on:
+ prev = prev[:m.start()+1]
+ tokens[i-1] = prev
+ if next:
+ m = lead_whitespace_re.search(next)
+ next = next[m.end():]
+ tokens[i+1] = next
+ return tokens
+
+
+def find_position(string, index):
+ """Given a string and index, return (line, column)"""
+ leading = string[:index].splitlines()
+ return (len(leading), len(leading[-1])+1)
+
+def parse(s, name=None):
+ r"""
+ Parses a string into a kind of AST
+
+ >>> parse('{{x}}')
+ [('expr', (1, 3), 'x')]
+ >>> parse('foo')
+ ['foo']
+ >>> parse('{{if x}}test{{endif}}')
+ [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+ >>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
+ ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+ >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+ [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+ >>> parse('{{py:x=1}}')
+ [('py', (1, 3), 'x=1')]
+ >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
+ [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+ Some exceptions::
+
+ >>> parse('{{continue}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: continue outside of for loop at line 1 column 3
+ >>> parse('{{if x}}foo')
+ Traceback (most recent call last):
+ ...
+ TemplateError: No {{endif}} at line 1 column 3
+ >>> parse('{{else}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: else outside of an if block at line 1 column 3
+ >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Unexpected endif at line 1 column 25
+ >>> parse('{{if}}{{endif}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: if with no expression at line 1 column 3
+ >>> parse('{{for x y}}{{endfor}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+ >>> parse('{{py:x=1\ny=2}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
+ """
+ tokens = lex(s, name=name)
+ result = []
+ while tokens:
+ next, tokens = parse_expr(tokens, name)
+ result.append(next)
+ return result
+
+def parse_expr(tokens, name, context=()):
+ if isinstance(tokens[0], basestring):
+ return tokens[0], tokens[1:]
+ expr, pos = tokens[0]
+ expr = expr.strip()
+ if expr.startswith('py:'):
+ expr = expr[3:].lstrip(' \t')
+ if expr.startswith('\n'):
+ expr = expr[1:]
+ else:
+ if '\n' in expr:
+ raise TemplateError(
+ 'Multi-line py blocks must start with a newline',
+ position=pos, name=name)
+ return ('py', pos, expr), tokens[1:]
+ elif expr in ('continue', 'break'):
+ if 'for' not in context:
+ raise TemplateError(
+ 'continue outside of for loop',
+ position=pos, name=name)
+ return (expr, pos), tokens[1:]
+ elif expr.startswith('if '):
+ return parse_cond(tokens, name, context)
+ elif (expr.startswith('elif ')
+ or expr == 'else'):
+ raise TemplateError(
+ '%s outside of an if block' % expr.split()[0],
+ position=pos, name=name)
+ elif expr in ('if', 'elif', 'for'):
+ raise TemplateError(
+ '%s with no expression' % expr,
+ position=pos, name=name)
+ elif expr in ('endif', 'endfor'):
+ raise TemplateError(
+ 'Unexpected %s' % expr,
+ position=pos, name=name)
+ elif expr.startswith('for '):
+ return parse_for(tokens, name, context)
+ elif expr.startswith('default '):
+ return parse_default(tokens, name, context)
+ elif expr.startswith('#'):
+ return ('comment', pos, tokens[0][0]), tokens[1:]
+ return ('expr', pos, tokens[0][0]), tokens[1:]
+
+def parse_cond(tokens, name, context):
+ start = tokens[0][1]
+ pieces = []
+ context = context + ('if',)
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'Missing {{endif}}',
+ position=start, name=name)
+ if (isinstance(tokens[0], tuple)
+ and tokens[0][0] == 'endif'):
+ return ('cond', start) + tuple(pieces), tokens[1:]
+ next, tokens = parse_one_cond(tokens, name, context)
+ pieces.append(next)
+
+def parse_one_cond(tokens, name, context):
+ (first, pos), tokens = tokens[0], tokens[1:]
+ content = []
+ if first.endswith(':'):
+ first = first[:-1]
+ if first.startswith('if '):
+ part = ('if', pos, first[3:].lstrip(), content)
+ elif first.startswith('elif '):
+ part = ('elif', pos, first[5:].lstrip(), content)
+ elif first == 'else':
+ part = ('else', pos, None, content)
+ else:
+ assert 0, "Unexpected token %r at %s" % (first, pos)
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'No {{endif}}',
+ position=pos, name=name)
+ if (isinstance(tokens[0], tuple)
+ and (tokens[0][0] == 'endif'
+ or tokens[0][0].startswith('elif ')
+ or tokens[0][0] == 'else')):
+ return part, tokens
+ next, tokens = parse_expr(tokens, name, context)
+ content.append(next)
+
+def parse_for(tokens, name, context):
+ first, pos = tokens[0]
+ tokens = tokens[1:]
+ context = ('for',) + context
+ content = []
+ assert first.startswith('for ')
+ if first.endswith(':'):
+ first = first[:-1]
+ first = first[3:].strip()
+ match = in_re.search(first)
+ if not match:
+ raise TemplateError(
+ 'Bad for (no "in") in %r' % first,
+ position=pos, name=name)
+ vars = first[:match.start()]
+ if '(' in vars:
+ raise TemplateError(
+ 'You cannot have () in the variable section of a for loop (%r)'
+ % vars, position=pos, name=name)
+ vars = tuple([
+ v.strip() for v in first[:match.start()].split(',')
+ if v.strip()])
+ expr = first[match.end():]
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'No {{endfor}}',
+ position=pos, name=name)
+ if (isinstance(tokens[0], tuple)
+ and tokens[0][0] == 'endfor'):
+ return ('for', pos, vars, expr, content), tokens[1:]
+ next, tokens = parse_expr(tokens, name, context)
+ content.append(next)
+
+def parse_default(tokens, name, context):
+ first, pos = tokens[0]
+ assert first.startswith('default ')
+ first = first.split(None, 1)[1]
+ parts = first.split('=', 1)
+ if len(parts) == 1:
+ raise TemplateError(
+ "Expression must be {{default var=value}}; no = found in %r" % first,
+ position=pos, name=name)
+ var = parts[0].strip()
+ if ',' in var:
+ raise TemplateError(
+ "{{default x, y = ...}} is not supported",
+ position=pos, name=name)
+ if not var_re.search(var):
+ raise TemplateError(
+ "Not a valid variable name for {{default}}: %r"
+ % var, position=pos, name=name)
+ expr = parts[1].strip()
+ return ('default', pos, var, expr), tokens[1:]
+
+_fill_command_usage = """\
+%prog [OPTIONS] TEMPLATE arg=value
+
+Use py:arg=value to set a Python value; otherwise all values are
+strings.
+"""
+
+def fill_command(args=None):
+ import sys, optparse, pkg_resources, os
+ if args is None:
+ args = sys.argv[1:]
+ dist = pkg_resources.get_distribution('Paste')
+ parser = optparse.OptionParser(
+ version=str(dist),
+ usage=_fill_command_usage)
+ parser.add_option(
+ '-o', '--output',
+ dest='output',
+ metavar="FILENAME",
+ help="File to write output to (default stdout)")
+ parser.add_option(
+ '--html',
+ dest='use_html',
+ action='store_true',
+ help="Use HTML style filling (including automatic HTML quoting)")
+ parser.add_option(
+ '--env',
+ dest='use_env',
+ action='store_true',
+ help="Put the environment in as top-level variables")
+ options, args = parser.parse_args(args)
+ if len(args) < 1:
+ print 'You must give a template filename'
+ print dir(parser)
+ assert 0
+ template_name = args[0]
+ args = args[1:]
+ vars = {}
+ if options.use_env:
+ vars.update(os.environ)
+ for value in args:
+ if '=' not in value:
+ print 'Bad argument: %r' % value
+ sys.exit(2)
+ name, value = value.split('=', 1)
+ if name.startswith('py:'):
+ name = name[:3]
+ value = eval(value)
+ vars[name] = value
+ if template_name == '-':
+ template_content = sys.stdin.read()
+ template_name = '<stdin>'
+ else:
+ f = open(template_name, 'rb')
+ template_content = f.read()
+ f.close()
+ if options.use_html:
+ TemplateClass = HTMLTemplate
+ else:
+ TemplateClass = Template
+ template = TemplateClass(template_content, name=template_name)
+ result = template.substitute(vars)
+ if options.output:
+ f = open(options.output, 'wb')
+ f.write(result)
+ f.close()
+ else:
+ sys.stdout.write(result)
+
+if __name__ == '__main__':
+ from paste.util.template import fill_command
+ fill_command()
+
+
diff --git a/paste/util/threadedprint.py b/paste/util/threadedprint.py
new file mode 100644
index 0000000..ae7dc0d
--- /dev/null
+++ b/paste/util/threadedprint.py
@@ -0,0 +1,250 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+threadedprint.py
+================
+
+:author: Ian Bicking
+:date: 12 Jul 2004
+
+Multi-threaded printing; allows the output produced via print to be
+separated according to the thread.
+
+To use this, you must install the catcher, like::
+
+ threadedprint.install()
+
+The installation optionally takes one of three parameters:
+
+default
+ The default destination for print statements (e.g., ``sys.stdout``).
+factory
+ A function that will produce the stream for a thread, given the
+ thread's name.
+paramwriter
+ Instead of writing to a file-like stream, this function will be
+ called like ``paramwriter(thread_name, text)`` for every write.
+
+The thread name is the value returned by
+``threading.currentThread().getName()``, a string (typically something
+like Thread-N).
+
+You can also submit file-like objects for specific threads, which will
+override any of these parameters. To do this, call ``register(stream,
+[threadName])``. ``threadName`` is optional, and if not provided the
+stream will be registered for the current thread.
+
+If no specific stream is registered for a thread, and no default has
+been provided, then an error will occur when anything is written to
+``sys.stdout`` (or printed).
+
+Note: the stream's ``write`` method will be called in the thread the
+text came from, so you should consider thread safety, especially if
+multiple threads share the same writer.
+
+Note: if you want access to the original standard out, use
+``sys.__stdout__``.
+
+You may also uninstall this, via::
+
+ threadedprint.uninstall()
+
+TODO
+----
+
+* Something with ``sys.stderr``.
+* Some default handlers. Maybe something that hooks into `logging`.
+* Possibly cache the results of ``factory`` calls. This would be a
+ semantic change.
+
+"""
+
+import threading
+import sys
+from paste.util import filemixin
+
+class PrintCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None,
+ leave_stdout=False):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, (
+ "You can only provide one of default, factory, or paramwriter")
+ if leave_stdout:
+ assert not default, (
+ "You cannot pass in both default (%r) and "
+ "leave_stdout=True" % default)
+ default = sys.stdout
+ if default:
+ self._defaultfunc = self._writedefault
+ elif factory:
+ self._defaultfunc = self._writefactory
+ elif paramwriter:
+ self._defaultfunc = self._writeparam
+ else:
+ self._defaultfunc = self._writeerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def write(self, v, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ self._defaultfunc(name, v)
+ else:
+ catcher = catchers[name]
+ catcher.write(v)
+
+ def seek(self, *args):
+ # Weird, but Google App Engine is seeking on stdout
+ name = threading.currentThread().getName()
+ catchers = self._catchers
+ if not name in catchers:
+ self._default.seek(*args)
+ else:
+ catchers[name].seek(*args)
+
+ def read(self, *args):
+ name = threading.currentThread().getName()
+ catchers = self._catchers
+ if not name in catchers:
+ self._default.read(*args)
+ else:
+ catchers[name].read(*args)
+
+
+ def _writedefault(self, name, v):
+ self._default.write(v)
+
+ def _writefactory(self, name, v):
+ self._factory(name).write(v)
+
+ def _writeparam(self, name, v):
+ self._paramwriter(name, v)
+
+ def _writeerror(self, name, v):
+ assert False, (
+ "There is no PrintCatcher output stream for the thread %r"
+ % name)
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), (
+ "There is no PrintCatcher catcher for the thread %r" % name)
+ del self._catchers[name]
+
+_printcatcher = None
+_oldstdout = None
+
+def install(**kw):
+ global _printcatcher, _oldstdout, register, deregister
+ if (not _printcatcher or sys.stdout is not _printcatcher):
+ _oldstdout = sys.stdout
+ _printcatcher = sys.stdout = PrintCatcher(**kw)
+ register = _printcatcher.register
+ deregister = _printcatcher.deregister
+
+def uninstall():
+ global _printcatcher, _oldstdout, register, deregister
+ if _printcatcher:
+ sys.stdout = _oldstdout
+ _printcatcher = _oldstdout = None
+ register = not_installed_error
+ deregister = not_installed_error
+
+def not_installed_error(*args, **kw):
+ assert False, (
+ "threadedprint has not yet been installed (call "
+ "threadedprint.install())")
+
+register = deregister = not_installed_error
+
+class StdinCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, (
+ "You can only provide one of default, factory, or paramwriter")
+ if default:
+ self._defaultfunc = self._readdefault
+ elif factory:
+ self._defaultfunc = self._readfactory
+ elif paramwriter:
+ self._defaultfunc = self._readparam
+ else:
+ self._defaultfunc = self._readerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def read(self, size=None, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ return self._defaultfunc(name, size)
+ else:
+ catcher = catchers[name]
+ return catcher.read(size)
+
+ def _readdefault(self, name, size):
+ self._default.read(size)
+
+ def _readfactory(self, name, size):
+ self._factory(name).read(size)
+
+ def _readparam(self, name, size):
+ self._paramreader(name, size)
+
+ def _readerror(self, name, size):
+ assert False, (
+ "There is no StdinCatcher output stream for the thread %r"
+ % name)
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), (
+ "There is no StdinCatcher catcher for the thread %r" % name)
+ del self._catchers[name]
+
+_stdincatcher = None
+_oldstdin = None
+
+def install_stdin(**kw):
+ global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
+ if not _stdincatcher:
+ _oldstdin = sys.stdin
+ _stdincatcher = sys.stdin = StdinCatcher(**kw)
+ register_stdin = _stdincatcher.register
+ deregister_stdin = _stdincatcher.deregister
+
+def uninstall():
+ global _stdincatcher, _oldstin, register_stdin, deregister_stdin
+ if _stdincatcher:
+ sys.stdin = _oldstdin
+ _stdincatcher = _oldstdin = None
+ register_stdin = deregister_stdin = not_installed_error_stdin
+
+def not_installed_error_stdin(*args, **kw):
+ assert False, (
+ "threadedprint has not yet been installed for stdin (call "
+ "threadedprint.install_stdin())")
diff --git a/paste/util/threadinglocal.py b/paste/util/threadinglocal.py
new file mode 100644
index 0000000..06f2643
--- /dev/null
+++ b/paste/util/threadinglocal.py
@@ -0,0 +1,43 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Implementation of thread-local storage, for Python versions that don't
+have thread local storage natively.
+"""
+
+try:
+ import threading
+except ImportError:
+ # No threads, so "thread local" means process-global
+ class local(object):
+ pass
+else:
+ try:
+ local = threading.local
+ except AttributeError:
+ # Added in 2.4, but now we'll have to define it ourselves
+ import thread
+ class local(object):
+
+ def __init__(self):
+ self.__dict__['__objs'] = {}
+
+ def __getattr__(self, attr, g=thread.get_ident):
+ try:
+ return self.__dict__['__objs'][g()][attr]
+ except KeyError:
+ raise AttributeError(
+ "No variable %s defined for the thread %s"
+ % (attr, g()))
+
+ def __setattr__(self, attr, value, g=thread.get_ident):
+ self.__dict__['__objs'].setdefault(g(), {})[attr] = value
+
+ def __delattr__(self, attr, g=thread.get_ident):
+ try:
+ del self.__dict__['__objs'][g()][attr]
+ except KeyError:
+ raise AttributeError(
+ "No variable %s defined for thread %s"
+ % (attr, g()))
+