diff options
Diffstat (limited to 'Tools/scripts')
65 files changed, 825 insertions, 1507 deletions
diff --git a/Tools/scripts/README b/Tools/scripts/README index b1c167ed5a..8c02529a9f 100644 --- a/Tools/scripts/README +++ b/Tools/scripts/README @@ -1,67 +1,63 @@ -This directory contains a collection of executable Python scripts that -are useful while building, extending or managing Python. Some (e.g., -dutree or lll) are also generally useful UNIX tools. +This directory contains a collection of executable Python scripts that are +useful while building, extending or managing Python. Some (e.g., dutree or lll) +are also generally useful UNIX tools. -See also the Demo/scripts directory! - -analyze_dxp.py Analyzes the result of sys.getdxp() -byext.py Print lines/words/chars stats of files by extension -byteyears.py Print product of a file's size and age -checkappend.py Search for multi-argument .append() calls -checkpyc.py Check presence and validity of ".pyc" files -classfix.py Convert old class syntax to new -cleanfuture.py Fix reduntant Python __future__ statements -combinerefs.py A helper for analyzing PYTHONDUMPREFS output. -copytime.py Copy one file's atime and mtime to another -crlf.py Change CRLF line endings to LF (Windows to Unix) -cvsfiles.py Print a list of files that are under CVS -db2pickle.py Dump a database file to a pickle -diff.py Print file diffs in context, unified, or ndiff formats -dutree.py Format du(1) output as a tree sorted by size -eptags.py Create Emacs TAGS file for Python modules +2to3 Main script for running the 2to3 conversion tool +analyze_dxp.py Analyzes the result of sys.getdxp() +byext.py Print lines/words/chars stats of files by extension +byteyears.py Print product of a file's size and age +checkpyc.py Check presence and validity of ".pyc" files +cleanfuture.py Fix redundant Python __future__ statements +combinerefs.py A helper for analyzing PYTHONDUMPREFS output +copytime.py Copy one file's atime and mtime to another +crlf.py Change CRLF line endings to LF (Windows to Unix) +db2pickle.py Dump a database file to a pickle +diff.py Print file diffs in context, unified, or ndiff formats +dutree.py Format du(1) output as a tree sorted by size +eptags.py Create Emacs TAGS file for Python modules find_recursionlimit.py Find the maximum recursion limit on this machine -finddiv.py A grep-like tool that looks for division operators -findlinksto.py Recursively find symbolic links to a given path prefix -findnocoding.py Find source files which need an encoding declaration -fixcid.py Massive identifier substitution on C source files -fixdiv.py Tool to fix division operators. -fixheader.py Add some cpp magic to a C include file -fixnotice.py Fix the copyright notice in source files -fixps.py Fix Python scripts' first line (if #!) -ftpmirror.py FTP mirror script -google.py Open a webbrowser with Google -gprof2html.py Transform gprof(1) output into useful HTML -h2py.py Translate #define's into Python assignments -idle Main program to start IDLE -ifdef.py Remove #if(n)def groups from C sources -lfcr.py Change LF line endings to CRLF (Unix to Windows) -linktree.py Make a copy of a tree with links to original files -lll.py Find and list symbolic links in current directory -logmerge.py Consolidate CVS/RCS logs read from stdin -mailerdaemon.py parse error messages from mailer daemons (Sjoerd&Jack) -md5sum.py Print MD5 checksums of argument files. -methfix.py Fix old method syntax def f(self, (a1, ..., aN)): -mkreal.py Turn a symbolic link into a real file or directory -ndiff.py Intelligent diff between text files (Tim Peters) -nm2def.py Create a template for PC/python_nt.def (Marc Lemburg) -objgraph.py Print object graph from nm output on a library -parseentities.py Utility for parsing HTML entity definitions -pathfix.py Change #!/usr/local/bin/python into something else -pdeps.py Print dependencies between Python modules -pickle2db.py Load a pickle generated by db2pickle.py to a database -pindent.py Indent Python code, giving block-closing comments -ptags.py Create vi tags file for Python modules -pydoc Python documentation browser. -pysource.py Find Python source files -redemo.py Basic regular expression demonstration facility -reindent.py Change .py files to use 4-space indents. -rgrep.py Reverse grep through a file (useful for big logfiles) -setup.py Install all scripts listed here -suff.py Sort a list of files by suffix -svneol.py Sets svn:eol-style on all files in directory -texcheck.py Validate Python LaTeX formatting (Raymond Hettinger) -texi2html.py Convert GNU texinfo files into HTML -treesync.py Synchronize source trees (very ideosyncratic) -untabify.py Replace tabs with spaces in argument files -which.py Find a program in $PATH -xxci.py Wrapper for rcsdiff and ci +finddiv.py A grep-like tool that looks for division operators +findlinksto.py Recursively find symbolic links to a given path prefix +findnocoding.py Find source files which need an encoding declaration +fixcid.py Massive identifier substitution on C source files +fixdiv.py Tool to fix division operators. +fixheader.py Add some cpp magic to a C include file +fixnotice.py Fix the copyright notice in source files +fixps.py Fix Python scripts' first line (if #!) +ftpmirror.py FTP mirror script +google.py Open a webbrowser with Google +gprof2html.py Transform gprof(1) output into useful HTML +h2py.py Translate #define's into Python assignments +idle3 Main program to start IDLE +ifdef.py Remove #if(n)def groups from C sources +lfcr.py Change LF line endings to CRLF (Unix to Windows) +linktree.py Make a copy of a tree with links to original files +lll.py Find and list symbolic links in current directory +mailerdaemon.py Parse error messages from mailer daemons (Sjoerd&Jack) +make_ctype.py Generate ctype.h replacement in stringobject.c +md5sum.py Print MD5 checksums of argument files +mkreal.py Turn a symbolic link into a real file or directory +ndiff.py Intelligent diff between text files (Tim Peters) +nm2def.py Create a template for PC/python_nt.def (Marc Lemburg) +objgraph.py Print object graph from nm output on a library +parseentities.py Utility for parsing HTML entity definitions +patchcheck.py Perform common checks and cleanup before committing +pathfix.py Change #!/usr/local/bin/python into something else +pdeps.py Print dependencies between Python modules +pickle2db.py Load a pickle generated by db2pickle.py to a database +pindent.py Indent Python code, giving block-closing comments +ptags.py Create vi tags file for Python modules +pydoc3 Python documentation browser +pysource.py Find Python source files +redemo.py Basic regular expression demonstration facility +reindent.py Change .py files to use 4-space indents +reindent-rst.py Fix-up reStructuredText file whitespace +rgrep.py Reverse grep through a file (useful for big logfiles) +serve.py Small wsgiref-based web server, used in make serve in Doc +suff.py Sort a list of files by suffix +svneol.py Set svn:eol-style on all files in directory +texi2html.py Convert GNU texinfo files into HTML +treesync.py Synchronize source trees (very idiosyncratic) +untabify.py Replace tabs with spaces in argument files +win_add2path.py Add Python to the search path on Windows +which.py Find a program in $PATH diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py new file mode 100755 index 0000000000..ab0ba42c36 --- /dev/null +++ b/Tools/scripts/abitype.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 +# This script converts a C file to use the PEP 384 type definition API +# Usage: abitype.py < old_code > new_code +import re, sys + +###### Replacement of PyTypeObject static instances ############## + +# classify each token, giving it a one-letter code: +# S: static +# T: PyTypeObject +# I: ident +# W: whitespace +# =, {, }, ; : themselves +def classify(): + res = [] + for t,v in tokens: + if t == 'other' and v in "={};": + res.append(v) + elif t == 'ident': + if v == 'PyTypeObject': + res.append('T') + elif v == 'static': + res.append('S') + else: + res.append('I') + elif t == 'ws': + res.append('W') + else: + res.append('.') + return ''.join(res) + +# Obtain a list of fields of a PyTypeObject, in declaration order, +# skipping ob_base +# All comments are dropped from the variable (which are typically +# just the slot names, anyway), and information is discarded whether +# the original type was static. +def get_fields(start, real_end): + pos = start + # static? + if tokens[pos][1] == 'static': + pos += 2 + # PyTypeObject + pos += 2 + # name + name = tokens[pos][1] + pos += 1 + while tokens[pos][1] != '{': + pos += 1 + pos += 1 + # PyVarObject_HEAD_INIT + while tokens[pos][0] in ('ws', 'comment'): + pos += 1 + if tokens[pos][1] != 'PyVarObject_HEAD_INIT': + raise Exception('%s has no PyVarObject_HEAD_INIT' % name) + while tokens[pos][1] != ')': + pos += 1 + pos += 1 + # field definitions: various tokens, comma-separated + fields = [] + while True: + while tokens[pos][0] in ('ws', 'comment'): + pos += 1 + end = pos + while tokens[end][1] not in ',}': + if tokens[end][1] == '(': + nesting = 1 + while nesting: + end += 1 + if tokens[end][1] == '(': nesting+=1 + if tokens[end][1] == ')': nesting-=1 + end += 1 + assert end < real_end + # join field, excluding separator and trailing ws + end1 = end-1 + while tokens[end1][0] in ('ws', 'comment'): + end1 -= 1 + fields.append(''.join(t[1] for t in tokens[pos:end1+1])) + if tokens[end][1] == '}': + break + pos = end+1 + return name, fields + +# List of type slots as of Python 3.2, omitting ob_base +typeslots = [ + 'tp_name', + 'tp_basicsize', + 'tp_itemsize', + 'tp_dealloc', + 'tp_print', + 'tp_getattr', + 'tp_setattr', + 'tp_reserved', + 'tp_repr', + 'tp_as_number', + 'tp_as_sequence', + 'tp_as_mapping', + 'tp_hash', + 'tp_call', + 'tp_str', + 'tp_getattro', + 'tp_setattro', + 'tp_as_buffer', + 'tp_flags', + 'tp_doc', + 'tp_traverse', + 'tp_clear', + 'tp_richcompare', + 'tp_weaklistoffset', + 'tp_iter', + 'iternextfunc', + 'tp_methods', + 'tp_members', + 'tp_getset', + 'tp_base', + 'tp_dict', + 'tp_descr_get', + 'tp_descr_set', + 'tp_dictoffset', + 'tp_init', + 'tp_alloc', + 'tp_new', + 'tp_free', + 'tp_is_gc', + 'tp_bases', + 'tp_mro', + 'tp_cache', + 'tp_subclasses', + 'tp_weaklist', + 'tp_del' + 'tp_version_tag' +] + +# Generate a PyType_Spec definition +def make_slots(name, fields): + res = [] + res.append('static PyType_Slot %s_slots[] = {' % name) + # defaults for spec + spec = { 'tp_itemsize':'0' } + for i, val in enumerate(fields): + if val.endswith('0'): + continue + if typeslots[i] in ('tp_name', 'tp_doc', 'tp_basicsize', + 'tp_itemsize', 'tp_flags'): + spec[typeslots[i]] = val + continue + res.append(' {Py_%s, %s},' % (typeslots[i], val)) + res.append('};') + res.append('static PyType_Spec %s_spec = {' % name) + res.append(' %s,' % spec['tp_name']) + res.append(' %s,' % spec['tp_basicsize']) + res.append(' %s,' % spec['tp_itemsize']) + res.append(' %s,' % spec['tp_flags']) + res.append(' %s_slots,' % name) + res.append('};\n') + return '\n'.join(res) + + +if __name__ == '__main__': + + ############ Simplistic C scanner ################################## + tokenizer = re.compile( + r"(?P<preproc>#.*\n)" + r"|(?P<comment>/\*.*?\*/)" + r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)" + r"|(?P<ws>[ \t\n]+)" + r"|(?P<other>.)", + re.MULTILINE) + + tokens = [] + source = sys.stdin.read() + pos = 0 + while pos != len(source): + m = tokenizer.match(source, pos) + tokens.append([m.lastgroup, m.group()]) + pos += len(tokens[-1][1]) + if tokens[-1][0] == 'preproc': + # continuation lines are considered + # only in preprocess statements + while tokens[-1][1].endswith('\\\n'): + nl = source.find('\n', pos) + if nl == -1: + line = source[pos:] + else: + line = source[pos:nl+1] + tokens[-1][1] += line + pos += len(line) + + # Main loop: replace all static PyTypeObjects until + # there are none left. + while 1: + c = classify() + m = re.search('(SW)?TWIW?=W?{.*?};', c) + if not m: + break + start = m.start() + end = m.end() + name, fields = get_fields(start, m) + tokens[start:end] = [('',make_slots(name, fields))] + + # Output result to stdout + for t, v in tokens: + sys.stdout.write(v) diff --git a/Tools/scripts/byext.py b/Tools/scripts/byext.py index e5b090c8e5..b79ff37e8c 100755 --- a/Tools/scripts/byext.py +++ b/Tools/scripts/byext.py @@ -1,10 +1,11 @@ -#! /usr/bin/env python3.0 +#! /usr/bin/env python3 """Show file statistics by extension.""" import os import sys + class Stats: def __init__(self): @@ -28,12 +29,11 @@ class Stats: sys.stderr.write("Can't list %s: %s\n" % (dir, err)) self.addstats("<dir>", "unlistable", 1) return - names.sort() - for name in names: + for name in sorted(names): if name.startswith(".#"): - continue # Skip CVS temp files + continue # Skip CVS temp files if name.endswith("~"): - continue# Skip Emacs backup files + continue # Skip Emacs backup files full = os.path.join(dir, name) if os.path.islink(full): self.addstats("<lnk>", "links", 1) @@ -46,26 +46,25 @@ class Stats: head, ext = os.path.splitext(filename) head, base = os.path.split(filename) if ext == base: - ext = "" # E.g. .cvsignore is deemed not to have an extension + ext = "" # E.g. .cvsignore is deemed not to have an extension ext = os.path.normcase(ext) if not ext: ext = "<none>" self.addstats(ext, "files", 1) try: - f = open(filename, "rb") + with open(filename, "rb") as f: + data = f.read() except IOError as err: sys.stderr.write("Can't open %s: %s\n" % (filename, err)) self.addstats(ext, "unopenable", 1) return - data = f.read() - f.close() self.addstats(ext, "bytes", len(data)) if b'\0' in data: self.addstats(ext, "binary", 1) return if not data: self.addstats(ext, "empty", 1) - #self.addstats(ext, "chars", len(data)) + # self.addstats(ext, "chars", len(data)) lines = str(data, "latin-1").splitlines() self.addstats(ext, "lines", len(lines)) del lines @@ -105,17 +104,20 @@ class Stats: for ext in exts: self.stats[ext]["ext"] = ext cols.insert(0, "ext") + def printheader(): for col in cols: print("%*s" % (colwidth[col], col), end=' ') print() + printheader() for ext in exts: for col in cols: value = self.stats[ext].get(col, "") print("%*s" % (colwidth[col], value), end=' ') print() - printheader() # Another header at the bottom + printheader() # Another header at the bottom + def main(): args = sys.argv[1:] @@ -125,5 +127,6 @@ def main(): s.statargs(args) s.report() + if __name__ == "__main__": main() diff --git a/Tools/scripts/byteyears.py b/Tools/scripts/byteyears.py index f486d26795..490b37f0bf 100755 --- a/Tools/scripts/byteyears.py +++ b/Tools/scripts/byteyears.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Print the product of age and size of each file, in suitable units. # diff --git a/Tools/scripts/checkappend.py b/Tools/scripts/checkappend.py deleted file mode 100755 index 4c74ee5769..0000000000 --- a/Tools/scripts/checkappend.py +++ /dev/null @@ -1,169 +0,0 @@ -#! /usr/bin/env python - -# Released to the public domain, by Tim Peters, 28 February 2000. - -"""checkappend.py -- search for multi-argument .append() calls. - -Usage: specify one or more file or directory paths: - checkappend [-v] file_or_dir [file_or_dir] ... - -Each file_or_dir is checked for multi-argument .append() calls. When -a directory, all .py files in the directory, and recursively in its -subdirectories, are checked. - -Use -v for status msgs. Use -vv for more status msgs. - -In the absence of -v, the only output is pairs of the form - - filename(linenumber): - line containing the suspicious append - -Note that this finds multi-argument append calls regardless of whether -they're attached to list objects. If a module defines a class with an -append method that takes more than one argument, calls to that method -will be listed. - -Note that this will not find multi-argument list.append calls made via a -bound method object. For example, this is not caught: - - somelist = [] - push = somelist.append - push(1, 2, 3) -""" - -__version__ = 1, 0, 0 - -import os -import sys -import getopt -import tokenize - -verbose = 0 - -def errprint(*args): - msg = ' '.join(args) - sys.stderr.write(msg) - sys.stderr.write("\n") - -def main(): - args = sys.argv[1:] - global verbose - try: - opts, args = getopt.getopt(sys.argv[1:], "v") - except getopt.error as msg: - errprint(str(msg) + "\n\n" + __doc__) - return - for opt, optarg in opts: - if opt == '-v': - verbose = verbose + 1 - if not args: - errprint(__doc__) - return - for arg in args: - check(arg) - -def check(file): - if os.path.isdir(file) and not os.path.islink(file): - if verbose: - print("%r: listing directory" % (file,)) - names = os.listdir(file) - for name in names: - fullname = os.path.join(file, name) - if ((os.path.isdir(fullname) and - not os.path.islink(fullname)) - or os.path.normcase(name[-3:]) == ".py"): - check(fullname) - return - - try: - f = open(file) - except IOError as msg: - errprint("%r: I/O Error: %s" % (file, msg)) - return - - if verbose > 1: - print("checking %r ..." % (file,)) - - ok = AppendChecker(file, f).run() - if verbose and ok: - print("%r: Clean bill of health." % (file,)) - -[FIND_DOT, - FIND_APPEND, - FIND_LPAREN, - FIND_COMMA, - FIND_STMT] = range(5) - -class AppendChecker: - def __init__(self, fname, file): - self.fname = fname - self.file = file - self.state = FIND_DOT - self.nerrors = 0 - - def run(self): - try: - tokens = tokenize.generate_tokens(self.file.readline) - for _token in tokens: - self.tokeneater(*_token) - except tokenize.TokenError as msg: - errprint("%r: Token Error: %s" % (self.fname, msg)) - self.nerrors = self.nerrors + 1 - return self.nerrors == 0 - - def tokeneater(self, type, token, start, end, line, - NEWLINE=tokenize.NEWLINE, - JUNK=(tokenize.COMMENT, tokenize.NL), - OP=tokenize.OP, - NAME=tokenize.NAME): - - state = self.state - - if type in JUNK: - pass - - elif state is FIND_DOT: - if type is OP and token == ".": - state = FIND_APPEND - - elif state is FIND_APPEND: - if type is NAME and token == "append": - self.line = line - self.lineno = start[0] - state = FIND_LPAREN - else: - state = FIND_DOT - - elif state is FIND_LPAREN: - if type is OP and token == "(": - self.level = 1 - state = FIND_COMMA - else: - state = FIND_DOT - - elif state is FIND_COMMA: - if type is OP: - if token in ("(", "{", "["): - self.level = self.level + 1 - elif token in (")", "}", "]"): - self.level = self.level - 1 - if self.level == 0: - state = FIND_DOT - elif token == "," and self.level == 1: - self.nerrors = self.nerrors + 1 - print("%s(%d):\n%s" % (self.fname, self.lineno, - self.line)) - # don't gripe about this stmt again - state = FIND_STMT - - elif state is FIND_STMT: - if type is NEWLINE: - state = FIND_DOT - - else: - raise SystemError("unknown internal state '%r'" % (state,)) - - self.state = state - -if __name__ == '__main__': - main() diff --git a/Tools/scripts/checkpyc.py b/Tools/scripts/checkpyc.py index 2e8fd5a832..d4fdce2515 100755 --- a/Tools/scripts/checkpyc.py +++ b/Tools/scripts/checkpyc.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Check that all ".pyc" files exist and are up-to-date # Uses module 'os' @@ -7,14 +7,17 @@ import os from stat import ST_MTIME import imp +# PEP 3147 compatibility (PYC Repository Directories) +cache_from_source = (imp.cache_from_source if hasattr(imp, 'get_tag') else + lambda path: path + 'c') + + def main(): - silent = 0 - verbose = 0 - if sys.argv[1:]: - if sys.argv[1] == '-v': - verbose = 1 - elif sys.argv[1] == '-s': - silent = 1 + if len(sys.argv) > 1: + verbose = (sys.argv[1] == '-v') + silent = (sys.argv[1] == '-s') + else: + verbose = silent = False MAGIC = imp.get_magic() if not silent: print('Using MAGIC word', repr(MAGIC)) @@ -26,9 +29,8 @@ def main(): continue if not silent: print('Checking ', repr(dirname), '...') - names.sort() - for name in names: - if name[-3:] == '.py': + for name in sorted(names): + if name.endswith('.py'): name = os.path.join(dirname, name) try: st = os.stat(name) @@ -37,30 +39,31 @@ def main(): continue if verbose: print('Check', repr(name), '...') - name_c = name + 'c' + name_c = cache_from_source(name) try: - f = open(name_c, 'r') + with open(name_c, 'rb') as f: + magic_str = f.read(4) + mtime_str = f.read(4) except IOError: print('Cannot open', repr(name_c)) continue - magic_str = f.read(4) - mtime_str = f.read(4) - f.close() if magic_str != MAGIC: print('Bad MAGIC word in ".pyc" file', end=' ') print(repr(name_c)) continue mtime = get_long(mtime_str) - if mtime == 0 or mtime == -1: + if mtime in {0, -1}: print('Bad ".pyc" file', repr(name_c)) elif mtime != st[ST_MTIME]: print('Out-of-date ".pyc" file', end=' ') print(repr(name_c)) + def get_long(s): if len(s) != 4: return -1 - return ord(s[0]) + (ord(s[1])<<8) + (ord(s[2])<<16) + (ord(s[3])<<24) + return s[0] + (s[1] << 8) + (s[2] << 16) + (s[3] << 24) + if __name__ == '__main__': main() diff --git a/Tools/scripts/classfix.py b/Tools/scripts/classfix.py deleted file mode 100755 index 0cd1e4955f..0000000000 --- a/Tools/scripts/classfix.py +++ /dev/null @@ -1,190 +0,0 @@ -#! /usr/bin/env python - -# This script is obsolete -- it is kept for historical purposes only. -# -# Fix Python source files to use the new class definition syntax, i.e., -# the syntax used in Python versions before 0.9.8: -# class C() = base(), base(), ...: ... -# is changed to the current syntax: -# class C(base, base, ...): ... -# -# The script uses heuristics to find class definitions that usually -# work but occasionally can fail; carefully check the output! -# -# Command line arguments are files or directories to be processed. -# Directories are searched recursively for files whose name looks -# like a python module. -# Symbolic links are always ignored (except as explicit directory -# arguments). Of course, the original file is kept as a back-up -# (with a "~" attached to its name). -# -# Changes made are reported to stdout in a diff-like format. -# -# Undoubtedly you can do this using find and sed or perl, but this is -# a nice example of Python code that recurses down a directory tree -# and uses regular expressions. Also note several subtleties like -# preserving the file's mode and avoiding to even write a temp file -# when no changes are needed for a file. -# -# NB: by changing only the function fixline() you can turn this -# into a program for a different change to Python programs... - -import sys -import re -import os -from stat import * - -err = sys.stderr.write -dbg = err -rep = sys.stdout.write - -def main(): - bad = 0 - if not sys.argv[1:]: # No arguments - err('usage: ' + sys.argv[0] + ' file-or-directory ...\n') - sys.exit(2) - for arg in sys.argv[1:]: - if os.path.isdir(arg): - if recursedown(arg): bad = 1 - elif os.path.islink(arg): - err(arg + ': will not process symbolic links\n') - bad = 1 - else: - if fix(arg): bad = 1 - sys.exit(bad) - -ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$') -def ispython(name): - return ispythonprog.match(name) >= 0 - -def recursedown(dirname): - dbg('recursedown(%r)\n' % (dirname,)) - bad = 0 - try: - names = os.listdir(dirname) - except os.error as msg: - err('%s: cannot list directory: %r\n' % (dirname, msg)) - return 1 - names.sort() - subdirs = [] - for name in names: - if name in (os.curdir, os.pardir): continue - fullname = os.path.join(dirname, name) - if os.path.islink(fullname): pass - elif os.path.isdir(fullname): - subdirs.append(fullname) - elif ispython(name): - if fix(fullname): bad = 1 - for fullname in subdirs: - if recursedown(fullname): bad = 1 - return bad - -def fix(filename): -## dbg('fix(%r)\n' % (filename,)) - try: - f = open(filename, 'r') - except IOError as msg: - err('%s: cannot open: %r\n' % (filename, msg)) - return 1 - head, tail = os.path.split(filename) - tempname = os.path.join(head, '@' + tail) - g = None - # If we find a match, we rewind the file and start over but - # now copy everything to a temp file. - lineno = 0 - while 1: - line = f.readline() - if not line: break - lineno = lineno + 1 - while line[-2:] == '\\\n': - nextline = f.readline() - if not nextline: break - line = line + nextline - lineno = lineno + 1 - newline = fixline(line) - if newline != line: - if g is None: - try: - g = open(tempname, 'w') - except IOError as msg: - f.close() - err('%s: cannot create: %r\n' % (tempname, msg)) - return 1 - f.seek(0) - lineno = 0 - rep(filename + ':\n') - continue # restart from the beginning - rep(repr(lineno) + '\n') - rep('< ' + line) - rep('> ' + newline) - if g is not None: - g.write(newline) - - # End of file - f.close() - if not g: return 0 # No changes - - # Finishing touch -- move files - - # First copy the file's mode to the temp file - try: - statbuf = os.stat(filename) - os.chmod(tempname, statbuf[ST_MODE] & 0o7777) - except os.error as msg: - err('%s: warning: chmod failed (%r)\n' % (tempname, msg)) - # Then make a backup of the original file as filename~ - try: - os.rename(filename, filename + '~') - except os.error as msg: - err('%s: warning: backup failed (%r)\n' % (filename, msg)) - # Now move the temp file to the original file - try: - os.rename(tempname, filename) - except os.error as msg: - err('%s: rename failed (%r)\n' % (filename, msg)) - return 1 - # Return succes - return 0 - -# This expression doesn't catch *all* class definition headers, -# but it's pretty darn close. -classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):' -classprog = re.compile(classexpr) - -# Expressions for finding base class expressions. -baseexpr = '^ *(.*) *( *) *$' -baseprog = re.compile(baseexpr) - -def fixline(line): - if classprog.match(line) < 0: # No 'class' keyword -- no change - return line - - (a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3] - # a0, b0 = Whole match (up to ':') - # a1, b1 = First subexpression (up to classname) - # a2, b2 = Second subexpression (=.*) - head = line[:b1] - tail = line[b0:] # Unmatched rest of line - - if a2 == b2: # No base classes -- easy case - return head + ':' + tail - - # Get rid of leading '=' - basepart = line[a2+1:b2] - - # Extract list of base expressions - bases = basepart.split(',') - - # Strip trailing '()' from each base expression - for i in range(len(bases)): - if baseprog.match(bases[i]) >= 0: - x1, y1 = baseprog.regs[1] - bases[i] = bases[i][x1:y1] - - # Join the bases back again and build the new line - basepart = ', '.join(bases) - - return head + '(' + basepart + '):' + tail - -if __name__ == '__main__': - main() diff --git a/Tools/scripts/cleanfuture.py b/Tools/scripts/cleanfuture.py index e6c8c8c670..b48ab60dd6 100644..100755 --- a/Tools/scripts/cleanfuture.py +++ b/Tools/scripts/cleanfuture.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """cleanfuture [-d][-r][-v] path ... diff --git a/Tools/scripts/combinerefs.py b/Tools/scripts/combinerefs.py index 68704dd7b9..e10e49ad7c 100644..100755 --- a/Tools/scripts/combinerefs.py +++ b/Tools/scripts/combinerefs.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """ combinerefs path diff --git a/Tools/scripts/copytime.py b/Tools/scripts/copytime.py index ba4a267d6f..e0220b5443 100755 --- a/Tools/scripts/copytime.py +++ b/Tools/scripts/copytime.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Copy one file's atime and mtime to another diff --git a/Tools/scripts/crlf.py b/Tools/scripts/crlf.py index 3dfa131ed7..f231d292ce 100755 --- a/Tools/scripts/crlf.py +++ b/Tools/scripts/crlf.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 "Replace CRLF with LF in argument files. Print names of changed files." import sys, os @@ -8,16 +8,16 @@ def main(): if os.path.isdir(filename): print(filename, "Directory!") continue - data = open(filename, "rb").read() - if '\0' in data: + with open(filename, "rb") as f: + data = f.read() + if b'\0' in data: print(filename, "Binary!") continue - newdata = data.replace("\r\n", "\n") + newdata = data.replace(b"\r\n", b"\n") if newdata != data: print(filename) - f = open(filename, "wb") - f.write(newdata) - f.close() + with open(filename, "wb") as f: + f.write(newdata) if __name__ == '__main__': main() diff --git a/Tools/scripts/cvsfiles.py b/Tools/scripts/cvsfiles.py deleted file mode 100755 index 9e65dc803d..0000000000 --- a/Tools/scripts/cvsfiles.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -"""Print a list of files that are mentioned in CVS directories. - -Usage: cvsfiles.py [-n file] [directory] ... - -If the '-n file' option is given, only files under CVS that are newer -than the given file are printed; by default, all files under CVS are -printed. As a special case, if a file does not exist, it is always -printed. -""" - -import os -import sys -import stat -import getopt - -cutofftime = 0 - -def main(): - try: - opts, args = getopt.getopt(sys.argv[1:], "n:") - except getopt.error as msg: - print(msg) - print(__doc__, end=' ') - return 1 - global cutofftime - newerfile = None - for o, a in opts: - if o == '-n': - cutofftime = getmtime(a) - if args: - for arg in args: - process(arg) - else: - process(".") - -def process(dir): - cvsdir = 0 - subdirs = [] - names = os.listdir(dir) - for name in names: - fullname = os.path.join(dir, name) - if name == "CVS": - cvsdir = fullname - else: - if os.path.isdir(fullname): - if not os.path.islink(fullname): - subdirs.append(fullname) - if cvsdir: - entries = os.path.join(cvsdir, "Entries") - for e in open(entries).readlines(): - words = e.split('/') - if words[0] == '' and words[1:]: - name = words[1] - fullname = os.path.join(dir, name) - if cutofftime and getmtime(fullname) <= cutofftime: - pass - else: - print(fullname) - for sub in subdirs: - process(sub) - -def getmtime(filename): - try: - st = os.stat(filename) - except os.error: - return 0 - return st[stat.ST_MTIME] - -if __name__ == '__main__': - main() diff --git a/Tools/scripts/db2pickle.py b/Tools/scripts/db2pickle.py index 9dd8bd3b72..a5532a8f3a 100644..100755 --- a/Tools/scripts/db2pickle.py +++ b/Tools/scripts/db2pickle.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ] diff --git a/Tools/scripts/diff.py b/Tools/scripts/diff.py index 52dcab1ff9..9efb078fb8 100644..100755 --- a/Tools/scripts/diff.py +++ b/Tools/scripts/diff.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """ Command line interface to difflib.py providing diffs in four formats: * ndiff: lists every line and highlights interline changes. diff --git a/Tools/scripts/dutree.py b/Tools/scripts/dutree.py index dbf4f1aa6c..6b4361ac61 100755 --- a/Tools/scripts/dutree.py +++ b/Tools/scripts/dutree.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Format du output in a tree shape import os, sys, errno diff --git a/Tools/scripts/eptags.py b/Tools/scripts/eptags.py index 8d35dfb31e..671ff11e65 100755 --- a/Tools/scripts/eptags.py +++ b/Tools/scripts/eptags.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Create a TAGS file for Python programs, usable with GNU Emacs. usage: eptags pyfiles... diff --git a/Tools/scripts/find-uname.py b/Tools/scripts/find-uname.py new file mode 100755 index 0000000000..b6ec1b6d79 --- /dev/null +++ b/Tools/scripts/find-uname.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +""" +For each argument on the command line, look for it in the set of all Unicode +names. Arguments are treated as case-insensitive regular expressions, e.g.: + + % find-uname 'small letter a$' 'horizontal line' + *** small letter a$ matches *** + LATIN SMALL LETTER A (97) + COMBINING LATIN SMALL LETTER A (867) + CYRILLIC SMALL LETTER A (1072) + PARENTHESIZED LATIN SMALL LETTER A (9372) + CIRCLED LATIN SMALL LETTER A (9424) + FULLWIDTH LATIN SMALL LETTER A (65345) + *** horizontal line matches *** + HORIZONTAL LINE EXTENSION (9135) +""" + +import unicodedata +import sys +import re + +def main(args): + unicode_names = [] + for ix in range(sys.maxunicode+1): + try: + unicode_names.append((ix, unicodedata.name(chr(ix)))) + except ValueError: # no name for the character + pass + for arg in args: + pat = re.compile(arg, re.I) + matches = [(y,x) for (x,y) in unicode_names + if pat.search(y) is not None] + if matches: + print("***", arg, "matches", "***") + for match in matches: + print("%s (%d)" % match) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py index 6f75d6da8b..7a8660356a 100644..100755 --- a/Tools/scripts/find_recursionlimit.py +++ b/Tools/scripts/find_recursionlimit.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Find the maximum recursion limit that prevents interpreter termination. This script finds the maximum safe recursion limit on a particular @@ -106,14 +106,16 @@ def check_limit(n, test_func_name): else: print("Yikes!") -limit = 1000 -while 1: - check_limit(limit, "test_recurse") - check_limit(limit, "test_add") - check_limit(limit, "test_repr") - check_limit(limit, "test_init") - check_limit(limit, "test_getattr") - check_limit(limit, "test_getitem") - check_limit(limit, "test_cpickle") - print("Limit of %d is fine" % limit) - limit = limit + 100 +if __name__ == '__main__': + + limit = 1000 + while 1: + check_limit(limit, "test_recurse") + check_limit(limit, "test_add") + check_limit(limit, "test_repr") + check_limit(limit, "test_init") + check_limit(limit, "test_getattr") + check_limit(limit, "test_getitem") + check_limit(limit, "test_cpickle") + print("Limit of %d is fine" % limit) + limit = limit + 100 diff --git a/Tools/scripts/finddiv.py b/Tools/scripts/finddiv.py index 558791fcc0..f24a702966 100755 --- a/Tools/scripts/finddiv.py +++ b/Tools/scripts/finddiv.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """finddiv - a grep-like tool that looks for division operators. diff --git a/Tools/scripts/findlinksto.py b/Tools/scripts/findlinksto.py index d3da7e4e11..b4c09ef1ce 100755 --- a/Tools/scripts/findlinksto.py +++ b/Tools/scripts/findlinksto.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # findlinksto # diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index 78fc8efeba..a494a480f0 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """List all those Python files that require a coding directive @@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths... -c: recognize Python source files trying to compile them -d: debug output""" % sys.argv[0] -try: - opts, args = getopt.getopt(sys.argv[1:], 'cd') -except getopt.error as msg: - print(msg, file=sys.stderr) - print(usage, file=sys.stderr) - sys.exit(1) - -is_python = pysource.looks_like_python -debug = False - -for o, a in opts: - if o == '-c': - is_python = pysource.can_be_compiled - elif o == '-d': - debug = True - -if not args: - print(usage, file=sys.stderr) - sys.exit(1) - -for fullpath in pysource.walk_python_files(args, is_python): - if debug: - print("Testing for coding: %s" % fullpath) - result = needs_declaration(fullpath) - if result: - print(fullpath) +if __name__ == '__main__': + + try: + opts, args = getopt.getopt(sys.argv[1:], 'cd') + except getopt.error as msg: + print(msg, file=sys.stderr) + print(usage, file=sys.stderr) + sys.exit(1) + + is_python = pysource.looks_like_python + debug = False + + for o, a in opts: + if o == '-c': + is_python = pysource.can_be_compiled + elif o == '-d': + debug = True + + if not args: + print(usage, file=sys.stderr) + sys.exit(1) + + for fullpath in pysource.walk_python_files(args, is_python): + if debug: + print("Testing for coding: %s" % fullpath) + result = needs_declaration(fullpath) + if result: + print(fullpath) diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py index b21a8365f2..87e2a0929f 100755 --- a/Tools/scripts/fixcid.py +++ b/Tools/scripts/fixcid.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Perform massive identifier substitution on C source files. # This actually tokenizes the files (to some extent) so it can @@ -292,7 +292,7 @@ def addsubst(substfile): if not words: continue if len(words) == 3 and words[0] == 'struct': words[:2] = [words[0] + ' ' + words[1]] - elif len(words) <> 2: + elif len(words) != 2: err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line)) continue if Reverse: diff --git a/Tools/scripts/fixdiv.py b/Tools/scripts/fixdiv.py index 8b15cc6595..4ecbea15e8 100755 --- a/Tools/scripts/fixdiv.py +++ b/Tools/scripts/fixdiv.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """fixdiv - tool to fix division operators. diff --git a/Tools/scripts/fixheader.py b/Tools/scripts/fixheader.py index 1208031357..ec840575b2 100755 --- a/Tools/scripts/fixheader.py +++ b/Tools/scripts/fixheader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Add some standard cpp magic to a header file diff --git a/Tools/scripts/fixnotice.py b/Tools/scripts/fixnotice.py index d35a339af5..aac8697708 100755 --- a/Tools/scripts/fixnotice.py +++ b/Tools/scripts/fixnotice.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """(Ostensibly) fix copyright notices in files. diff --git a/Tools/scripts/fixps.py b/Tools/scripts/fixps.py index fd2ca71496..b002261206 100755 --- a/Tools/scripts/fixps.py +++ b/Tools/scripts/fixps.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Fix Python script(s) to reference the interpreter via /usr/bin/env python. # Warning: this overwrites the file without making a backup. diff --git a/Tools/scripts/ftpmirror.py b/Tools/scripts/ftpmirror.py index b79db1ac5c..9e8be1daf8 100755 --- a/Tools/scripts/ftpmirror.py +++ b/Tools/scripts/ftpmirror.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Mirror a remote ftp subtree into a local directory tree. diff --git a/Tools/scripts/get-remote-certificate.py b/Tools/scripts/get-remote-certificate.py new file mode 100755 index 0000000000..5811f202ed --- /dev/null +++ b/Tools/scripts/get-remote-certificate.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# +# fetch the certificate that the server(s) are providing in PEM form +# +# args are HOST:PORT [, HOST:PORT...] +# +# By Bill Janssen. + +import re +import os +import sys +import tempfile + + +def fetch_server_certificate (host, port): + + def subproc(cmd): + from subprocess import Popen, PIPE, STDOUT + proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) + status = proc.wait() + output = proc.stdout.read() + return status, output + + def strip_to_x509_cert(certfile_contents, outfile=None): + m = re.search(br"^([-]+BEGIN CERTIFICATE[-]+[\r]*\n" + br".*[\r]*^[-]+END CERTIFICATE[-]+)$", + certfile_contents, re.MULTILINE | re.DOTALL) + if not m: + return None + else: + tn = tempfile.mktemp() + fp = open(tn, "wb") + fp.write(m.group(1) + b"\n") + fp.close() + try: + tn2 = (outfile or tempfile.mktemp()) + status, output = subproc(r'openssl x509 -in "%s" -out "%s"' % + (tn, tn2)) + if status != 0: + raise RuntimeError('OpenSSL x509 failed with status %s and ' + 'output: %r' % (status, output)) + fp = open(tn2, 'rb') + data = fp.read() + fp.close() + os.unlink(tn2) + return data + finally: + os.unlink(tn) + + if sys.platform.startswith("win"): + tfile = tempfile.mktemp() + fp = open(tfile, "w") + fp.write("quit\n") + fp.close() + try: + status, output = subproc( + 'openssl s_client -connect "%s:%s" -showcerts < "%s"' % + (host, port, tfile)) + finally: + os.unlink(tfile) + else: + status, output = subproc( + 'openssl s_client -connect "%s:%s" -showcerts < /dev/null' % + (host, port)) + if status != 0: + raise RuntimeError('OpenSSL connect failed with status %s and ' + 'output: %r' % (status, output)) + certtext = strip_to_x509_cert(output) + if not certtext: + raise ValueError("Invalid response received from server at %s:%s" % + (host, port)) + return certtext + + +if __name__ == "__main__": + if len(sys.argv) < 2: + sys.stderr.write( + "Usage: %s HOSTNAME:PORTNUMBER [, HOSTNAME:PORTNUMBER...]\n" % + sys.argv[0]) + sys.exit(1) + for arg in sys.argv[1:]: + host, port = arg.split(":") + sys.stdout.buffer.write(fetch_server_certificate(host, int(port))) + sys.exit(0) diff --git a/Tools/scripts/google.py b/Tools/scripts/google.py index 6219c2d4bc..12152bb5f8 100755 --- a/Tools/scripts/google.py +++ b/Tools/scripts/google.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 import sys, webbrowser diff --git a/Tools/scripts/gprof2html.py b/Tools/scripts/gprof2html.py index cb01c2ce68..6c899d997c 100755 --- a/Tools/scripts/gprof2html.py +++ b/Tools/scripts/gprof2html.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python2.3 +#! /usr/bin/env python32.3 """Transform gprof(1) output into useful HTML.""" @@ -19,17 +19,19 @@ trailer = """\ </html> """ -def add_escapes(input): - for line in input: - yield cgi.escape(line) +def add_escapes(filename): + with open(filename) as fp: + for line in fp: + yield cgi.escape(line) + def main(): filename = "gprof.out" if sys.argv[1:]: filename = sys.argv[1] outputfilename = filename + ".html" - input = add_escapes(file(filename)) - output = file(outputfilename, "w") + input = add_escapes(filename) + output = open(outputfilename, "w") output.write(header % filename) for line in input: output.write(line) diff --git a/Tools/scripts/h2py.py b/Tools/scripts/h2py.py index 9b5b6971f9..4f871d9010 100755 --- a/Tools/scripts/h2py.py +++ b/Tools/scripts/h2py.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Read #define's and translate to Python code. # Handle #include statements. @@ -49,13 +49,12 @@ except KeyError: try: searchdirs=os.environ['INCLUDE'].split(';') except KeyError: + searchdirs=['/usr/include'] try: - if sys.platform.startswith("atheos"): - searchdirs=os.environ['C_INCLUDE_PATH'].split(':') - else: - raise KeyError + searchdirs.insert(0, os.path.join('/usr/include', + os.environ['MULTIARCH'])) except KeyError: - searchdirs=['/usr/include'] + pass def main(): global filedict diff --git a/Tools/scripts/ifdef.py b/Tools/scripts/ifdef.py index 2ed7a6667b..46167ad843 100755 --- a/Tools/scripts/ifdef.py +++ b/Tools/scripts/ifdef.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Selectively preprocess #ifdef / #ifndef statements. # Usage: diff --git a/Tools/scripts/lfcr.py b/Tools/scripts/lfcr.py index 1b9a5b7d1f..bf8fe1c245 100755 --- a/Tools/scripts/lfcr.py +++ b/Tools/scripts/lfcr.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 "Replace LF with CRLF in argument files. Print names of changed files." @@ -9,16 +9,16 @@ def main(): if os.path.isdir(filename): print(filename, "Directory!") continue - data = open(filename, "rb").read() - if '\0' in data: + with open(filename, "rb") as f: + data = f.read() + if b'\0' in data: print(filename, "Binary!") continue - newdata = re.sub("\r?\n", "\r\n", data) + newdata = re.sub(b"\r?\n", b"\r\n", data) if newdata != data: print(filename) - f = open(filename, "wb") - f.write(newdata) - f.close() + with open(filename, "wb") as f: + f.write(newdata) if __name__ == '__main__': main() diff --git a/Tools/scripts/linktree.py b/Tools/scripts/linktree.py index 748b0424bb..982f4800ba 100755 --- a/Tools/scripts/linktree.py +++ b/Tools/scripts/linktree.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # linktree # diff --git a/Tools/scripts/lll.py b/Tools/scripts/lll.py index 5ee1504db5..aa4e55091e 100755 --- a/Tools/scripts/lll.py +++ b/Tools/scripts/lll.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Find symbolic links and show where they point to. # Arguments are directories to search; default is current directory. diff --git a/Tools/scripts/logmerge.py b/Tools/scripts/logmerge.py deleted file mode 100755 index 06750b0c2b..0000000000 --- a/Tools/scripts/logmerge.py +++ /dev/null @@ -1,185 +0,0 @@ -#! /usr/bin/env python - -"""Consolidate a bunch of CVS or RCS logs read from stdin. - -Input should be the output of a CVS or RCS logging command, e.g. - - cvs log -rrelease14: - -which dumps all log messages from release1.4 upwards (assuming that -release 1.4 was tagged with tag 'release14'). Note the trailing -colon! - -This collects all the revision records and outputs them sorted by date -rather than by file, collapsing duplicate revision record, i.e., -records with the same message for different files. - -The -t option causes it to truncate (discard) the last revision log -entry; this is useful when using something like the above cvs log -command, which shows the revisions including the given tag, while you -probably want everything *since* that tag. - -The -r option reverses the output (oldest first; the default is oldest -last). - -The -b tag option restricts the output to *only* checkin messages -belonging to the given branch tag. The form -b HEAD restricts the -output to checkin messages belonging to the CVS head (trunk). (It -produces some output if tag is a non-branch tag, but this output is -not very useful.) - --h prints this message and exits. - -XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7 -from their output. -""" - -import sys, errno, getopt, re - -sep1 = '='*77 + '\n' # file separator -sep2 = '-'*28 + '\n' # revision separator - -def main(): - """Main program""" - truncate_last = 0 - reverse = 0 - branch = None - opts, args = getopt.getopt(sys.argv[1:], "trb:h") - for o, a in opts: - if o == '-t': - truncate_last = 1 - elif o == '-r': - reverse = 1 - elif o == '-b': - branch = a - elif o == '-h': - print(__doc__) - sys.exit(0) - database = [] - while 1: - chunk = read_chunk(sys.stdin) - if not chunk: - break - records = digest_chunk(chunk, branch) - if truncate_last: - del records[-1] - database[len(database):] = records - database.sort() - if not reverse: - database.reverse() - format_output(database) - -def read_chunk(fp): - """Read a chunk -- data for one file, ending with sep1. - - Split the chunk in parts separated by sep2. - - """ - chunk = [] - lines = [] - while 1: - line = fp.readline() - if not line: - break - if line == sep1: - if lines: - chunk.append(lines) - break - if line == sep2: - if lines: - chunk.append(lines) - lines = [] - else: - lines.append(line) - return chunk - -def digest_chunk(chunk, branch=None): - """Digest a chunk -- extract working file name and revisions""" - lines = chunk[0] - key = 'Working file:' - keylen = len(key) - for line in lines: - if line[:keylen] == key: - working_file = line[keylen:].strip() - break - else: - working_file = None - if branch is None: - pass - elif branch == "HEAD": - branch = re.compile(r"^\d+\.\d+$") - else: - revisions = {} - key = 'symbolic names:\n' - found = 0 - for line in lines: - if line == key: - found = 1 - elif found: - if line[0] in '\t ': - tag, rev = line.split() - if tag[-1] == ':': - tag = tag[:-1] - revisions[tag] = rev - else: - found = 0 - rev = revisions.get(branch) - branch = re.compile(r"^<>$") # <> to force a mismatch by default - if rev: - if rev.find('.0.') >= 0: - rev = rev.replace('.0.', '.') - branch = re.compile(r"^" + re.escape(rev) + r"\.\d+$") - records = [] - for lines in chunk[1:]: - revline = lines[0] - dateline = lines[1] - text = lines[2:] - words = dateline.split() - author = None - if len(words) >= 3 and words[0] == 'date:': - dateword = words[1] - timeword = words[2] - if timeword[-1:] == ';': - timeword = timeword[:-1] - date = dateword + ' ' + timeword - if len(words) >= 5 and words[3] == 'author:': - author = words[4] - if author[-1:] == ';': - author = author[:-1] - else: - date = None - text.insert(0, revline) - words = revline.split() - if len(words) >= 2 and words[0] == 'revision': - rev = words[1] - else: - # No 'revision' line -- weird... - rev = None - text.insert(0, revline) - if branch: - if rev is None or not branch.match(rev): - continue - records.append((date, working_file, rev, author, text)) - return records - -def format_output(database): - prevtext = None - prev = [] - database.append((None, None, None, None, None)) # Sentinel - for (date, working_file, rev, author, text) in database: - if text != prevtext: - if prev: - print(sep2, end=' ') - for (p_date, p_working_file, p_rev, p_author) in prev: - print(p_date, p_author, p_working_file, p_rev) - sys.stdout.writelines(prevtext) - prev = [] - prev.append((date, working_file, rev, author)) - prevtext = text - -if __name__ == '__main__': - try: - main() - except IOError as e: - if e.errno != errno.EPIPE: - raise diff --git a/Tools/scripts/mailerdaemon.py b/Tools/scripts/mailerdaemon.py index 4934b92eaa..aeb451e942 100755 --- a/Tools/scripts/mailerdaemon.py +++ b/Tools/scripts/mailerdaemon.py @@ -1,4 +1,5 @@ -"""mailerdaemon - classes to parse mailer-daemon messages""" +#!/usr/bin/env python3 +"""Classes to parse mailer-daemon messages.""" import calendar import email.message @@ -6,7 +7,10 @@ import re import os import sys -Unparseable = 'mailerdaemon.Unparseable' + +class Unparseable(Exception): + pass + class ErrorMessage(email.message.Message): def __init__(self): @@ -18,8 +22,10 @@ class ErrorMessage(email.message.Message): if not sub: return 0 sub = sub.lower() - if sub.startswith('waiting mail'): return 1 - if 'warning' in sub: return 1 + if sub.startswith('waiting mail'): + return 1 + if 'warning' in sub: + return 1 self.sub = sub return 0 @@ -145,14 +151,17 @@ def emparse_list(fp, sub): errors.append(' '.join((email.strip()+': '+reason).split())) return errors -EMPARSERS = [emparse_list, ] +EMPARSERS = [emparse_list] def sort_numeric(a, b): a = int(a) b = int(b) - if a < b: return -1 - elif a > b: return 1 - else: return 0 + if a < b: + return -1 + elif a > b: + return 1 + else: + return 0 def parsedir(dir, modify): os.chdir(dir) diff --git a/Tools/scripts/make_ctype.py b/Tools/scripts/make_ctype.py index 359d6b3160..afee1c5833 100644..100755 --- a/Tools/scripts/make_ctype.py +++ b/Tools/scripts/make_ctype.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Script that generates the ctype.h-replacement in stringobject.c.""" NAMES = ("LOWER", "UPPER", "ALPHA", "DIGIT", "XDIGIT", "ALNUM", "SPACE") diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py index 140c0b3e1a..521960c17d 100644..100755 --- a/Tools/scripts/md5sum.py +++ b/Tools/scripts/md5sum.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Python utility to print MD5 checksums of argument files. """ @@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin import sys import os import getopt -import md5 +from hashlib import md5 def sum(*files): sts = 0 diff --git a/Tools/scripts/methfix.py b/Tools/scripts/methfix.py deleted file mode 100755 index f5fe7cd6ec..0000000000 --- a/Tools/scripts/methfix.py +++ /dev/null @@ -1,171 +0,0 @@ -#! /usr/bin/env python - -# Fix Python source files to avoid using -# def method(self, (arg1, ..., argn)): -# instead of the more rational -# def method(self, arg1, ..., argn): -# -# Command line arguments are files or directories to be processed. -# Directories are searched recursively for files whose name looks -# like a python module. -# Symbolic links are always ignored (except as explicit directory -# arguments). Of course, the original file is kept as a back-up -# (with a "~" attached to its name). -# It complains about binaries (files containing null bytes) -# and about files that are ostensibly not Python files: if the first -# line starts with '#!' and does not contain the string 'python'. -# -# Changes made are reported to stdout in a diff-like format. -# -# Undoubtedly you can do this using find and sed or perl, but this is -# a nice example of Python code that recurses down a directory tree -# and uses regular expressions. Also note several subtleties like -# preserving the file's mode and avoiding to even write a temp file -# when no changes are needed for a file. -# -# NB: by changing only the function fixline() you can turn this -# into a program for a different change to Python programs... - -import sys -import re -import os -from stat import * - -err = sys.stderr.write -dbg = err -rep = sys.stdout.write - -def main(): - bad = 0 - if not sys.argv[1:]: # No arguments - err('usage: ' + sys.argv[0] + ' file-or-directory ...\n') - sys.exit(2) - for arg in sys.argv[1:]: - if os.path.isdir(arg): - if recursedown(arg): bad = 1 - elif os.path.islink(arg): - err(arg + ': will not process symbolic links\n') - bad = 1 - else: - if fix(arg): bad = 1 - sys.exit(bad) - -ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$') -def ispython(name): - return ispythonprog.match(name) >= 0 - -def recursedown(dirname): - dbg('recursedown(%r)\n' % (dirname,)) - bad = 0 - try: - names = os.listdir(dirname) - except os.error as msg: - err('%s: cannot list directory: %r\n' % (dirname, msg)) - return 1 - names.sort() - subdirs = [] - for name in names: - if name in (os.curdir, os.pardir): continue - fullname = os.path.join(dirname, name) - if os.path.islink(fullname): pass - elif os.path.isdir(fullname): - subdirs.append(fullname) - elif ispython(name): - if fix(fullname): bad = 1 - for fullname in subdirs: - if recursedown(fullname): bad = 1 - return bad - -def fix(filename): -## dbg('fix(%r)\n' % (filename,)) - try: - f = open(filename, 'r') - except IOError as msg: - err('%s: cannot open: %r\n' % (filename, msg)) - return 1 - head, tail = os.path.split(filename) - tempname = os.path.join(head, '@' + tail) - g = None - # If we find a match, we rewind the file and start over but - # now copy everything to a temp file. - lineno = 0 - while 1: - line = f.readline() - if not line: break - lineno = lineno + 1 - if g is None and '\0' in line: - # Check for binary files - err(filename + ': contains null bytes; not fixed\n') - f.close() - return 1 - if lineno == 1 and g is None and line[:2] == '#!': - # Check for non-Python scripts - words = line[2:].split() - if words and re.search('[pP]ython', words[0]) < 0: - msg = filename + ': ' + words[0] - msg = msg + ' script; not fixed\n' - err(msg) - f.close() - return 1 - while line[-2:] == '\\\n': - nextline = f.readline() - if not nextline: break - line = line + nextline - lineno = lineno + 1 - newline = fixline(line) - if newline != line: - if g is None: - try: - g = open(tempname, 'w') - except IOError as msg: - f.close() - err('%s: cannot create: %r\n' % (tempname, msg)) - return 1 - f.seek(0) - lineno = 0 - rep(filename + ':\n') - continue # restart from the beginning - rep(repr(lineno) + '\n') - rep('< ' + line) - rep('> ' + newline) - if g is not None: - g.write(newline) - - # End of file - f.close() - if not g: return 0 # No changes - - # Finishing touch -- move files - - # First copy the file's mode to the temp file - try: - statbuf = os.stat(filename) - os.chmod(tempname, statbuf[ST_MODE] & 0o7777) - except os.error as msg: - err('%s: warning: chmod failed (%r)\n' % (tempname, msg)) - # Then make a backup of the original file as filename~ - try: - os.rename(filename, filename + '~') - except os.error as msg: - err('%s: warning: backup failed (%r)\n' % (filename, msg)) - # Now move the temp file to the original file - try: - os.rename(tempname, filename) - except os.error as msg: - err('%s: rename failed (%r)\n' % (filename, msg)) - return 1 - # Return succes - return 0 - - -fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:' -fixprog = re.compile(fixpat) - -def fixline(line): - if fixprog.match(line) >= 0: - (a, b), (c, d) = fixprog.regs[1:3] - line = line[:a] + line[c:d] + line[b:] - return line - -if __name__ == '__main__': - main() diff --git a/Tools/scripts/mkreal.py b/Tools/scripts/mkreal.py index 8bc2ec1956..b21909e691 100755 --- a/Tools/scripts/mkreal.py +++ b/Tools/scripts/mkreal.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # mkreal # diff --git a/Tools/scripts/ndiff.py b/Tools/scripts/ndiff.py index c60c8a83ec..2422091dcc 100755 --- a/Tools/scripts/ndiff.py +++ b/Tools/scripts/ndiff.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Module ndiff version 1.7.0 # Released to the public domain 08-Dec-2000, diff --git a/Tools/scripts/nm2def.py b/Tools/scripts/nm2def.py index 9dfb99143f..8f07559e21 100755 --- a/Tools/scripts/nm2def.py +++ b/Tools/scripts/nm2def.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """nm2def.py Helpers to extract symbols from Unix libs and auto-generate diff --git a/Tools/scripts/objgraph.py b/Tools/scripts/objgraph.py index 0975a3bbcd..1e1fce07dd 100755 --- a/Tools/scripts/objgraph.py +++ b/Tools/scripts/objgraph.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # objgraph # diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py index 8d93167ed8..a042d1c24c 100755 --- a/Tools/scripts/parseentities.py +++ b/Tools/scripts/parseentities.py @@ -1,4 +1,4 @@ -#!/usr/local/bin/python +#!/usr/bin/env python3 """ Utility for parsing HTML entity definitions available from: http://www.w3.org/ as e.g. @@ -13,7 +13,6 @@ """ import re,sys -import TextTools entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->') @@ -45,7 +44,7 @@ def writefile(f,defs): charcode = repr(charcode) else: charcode = repr(charcode) - comment = TextTools.collapse(comment) + comment = ' '.join(comment.split()) f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment)) f.write('\n}\n') diff --git a/Tools/scripts/patchcheck.py b/Tools/scripts/patchcheck.py index e767edabd9..6a39145890 100644..100755 --- a/Tools/scripts/patchcheck.py +++ b/Tools/scripts/patchcheck.py @@ -1,13 +1,18 @@ +#!/usr/bin/env python3 import re import sys import shutil import os.path import subprocess +import sysconfig import reindent import untabify +SRCDIR = sysconfig.get_config_var('srcdir') + + def n_files_str(count): """Return 'N file(s)' with the proper plurality on 'file'.""" return "{} file{}".format(count, "s" if count != 1 else "") @@ -31,13 +36,25 @@ def status(message, modal=False, info=None): return decorated_fxn +def mq_patches_applied(): + """Check if there are any applied MQ patches.""" + cmd = 'hg qapplied' + with subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as st: + bstdout, _ = st.communicate() + return st.returncode == 0 and bstdout + + @status("Getting the list of files that have been added/changed", info=lambda x: n_files_str(len(x))) def changed_files(): """Get the list of changed or added files from the VCS.""" - if os.path.isdir('.hg'): + if os.path.isdir(os.path.join(SRCDIR, '.hg')): vcs = 'hg' cmd = 'hg status --added --modified --no-status' + if mq_patches_applied(): + cmd += ' --rev qparent' elif os.path.isdir('.svn'): vcs = 'svn' cmd = 'svn status --quiet --non-interactive --ignore-externals' @@ -74,7 +91,7 @@ def normalize_whitespace(file_paths): reindent.makebackup = False # No need to create backups. fixed = [] for path in (x for x in file_paths if x.endswith('.py')): - if reindent.check(path): + if reindent.check(os.path.join(SRCDIR, path)): fixed.append(path) return fixed @@ -84,10 +101,11 @@ def normalize_c_whitespace(file_paths): """Report if any C files """ fixed = [] for path in file_paths: - with open(path, 'r') as f: + abspath = os.path.join(SRCDIR, path) + with open(abspath, 'r') as f: if '\t' not in f.read(): continue - untabify.process(path, 8, verbose=False) + untabify.process(abspath, 8, verbose=False) fixed.append(path) return fixed @@ -98,13 +116,14 @@ ws_re = re.compile(br'\s+(\r?\n)$') def normalize_docs_whitespace(file_paths): fixed = [] for path in file_paths: + abspath = os.path.join(SRCDIR, path) try: - with open(path, 'rb') as f: + with open(abspath, 'rb') as f: lines = f.readlines() new_lines = [ws_re.sub(br'\1', line) for line in lines] if new_lines != lines: - shutil.copyfile(path, path + '.bak') - with open(path, 'wb') as f: + shutil.copyfile(abspath, abspath + '.bak') + with open(abspath, 'wb') as f: f.writelines(new_lines) fixed.append(path) except Exception as err: @@ -150,8 +169,10 @@ def main(): reported_news(special_files) # Test suite run and passed. - print() - print("Did you run the test suite?") + if python_files or c_files: + end = " and check for refleaks?" if c_files else "?" + print() + print("Did you run the test suite" + end) if __name__ == '__main__': diff --git a/Tools/scripts/pathfix.py b/Tools/scripts/pathfix.py index 6cbac10ecb..dd08e0aba3 100755 --- a/Tools/scripts/pathfix.py +++ b/Tools/scripts/pathfix.py @@ -30,20 +30,24 @@ dbg = err rep = sys.stdout.write new_interpreter = None +preserve_timestamps = False def main(): global new_interpreter - usage = ('usage: %s -i /interpreter file-or-directory ...\n' % + global preserve_timestamps + usage = ('usage: %s -i /interpreter -p file-or-directory ...\n' % sys.argv[0]) try: - opts, args = getopt.getopt(sys.argv[1:], 'i:') + opts, args = getopt.getopt(sys.argv[1:], 'i:p') except getopt.error as msg: - err(msg + '\n') + err(str(msg) + '\n') err(usage) sys.exit(2) for o, a in opts: if o == '-i': new_interpreter = a.encode() + if o == '-p': + preserve_timestamps = True if not new_interpreter or not new_interpreter.startswith(b'/') or \ not args: err('-i option or file-or-directory missing\n') @@ -119,9 +123,13 @@ def fix(filename): # Finishing touch -- move files + mtime = None + atime = None # First copy the file's mode to the temp file try: statbuf = os.stat(filename) + mtime = statbuf.st_mtime + atime = statbuf.st_atime os.chmod(tempname, statbuf[ST_MODE] & 0o7777) except os.error as msg: err('%s: warning: chmod failed (%r)\n' % (tempname, msg)) @@ -136,6 +144,13 @@ def fix(filename): except os.error as msg: err('%s: rename failed (%r)\n' % (filename, msg)) return 1 + if preserve_timestamps: + if atime and mtime: + try: + os.utime(filename, (atime, mtime)) + except os.error as msg: + err('%s: reset of timestamp failed (%r)\n' % (filename, msg)) + return 1 # Return succes return 0 diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py index 5c5a05b9ad..f8218ac524 100755 --- a/Tools/scripts/pdeps.py +++ b/Tools/scripts/pdeps.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # pdeps # @@ -76,10 +76,9 @@ def process(filename, table): nextline = fp.readline() if not nextline: break line = line[:-1] + nextline - if m_import.match(line) >= 0: - (a, b), (a1, b1) = m_import.regs[:2] - elif m_from.match(line) >= 0: - (a, b), (a1, b1) = m_from.regs[:2] + m_found = m_import.match(line) or m_from.match(line) + if m_found: + (a, b), (a1, b1) = m_found.regs[:2] else: continue words = line[a1:b1].split(',') # print '#', line, words @@ -87,6 +86,7 @@ def process(filename, table): word = word.strip() if word not in list: list.append(word) + fp.close() # Compute closure (this is in fact totally general) @@ -123,7 +123,7 @@ def closure(table): def inverse(table): inv = {} for key in table.keys(): - if not inv.has_key(key): + if key not in inv: inv[key] = [] for item in table[key]: store(inv, item, key) diff --git a/Tools/scripts/pickle2db.py b/Tools/scripts/pickle2db.py index a43ffae448..b5b6571863 100644..100755 --- a/Tools/scripts/pickle2db.py +++ b/Tools/scripts/pickle2db.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile diff --git a/Tools/scripts/pindent.py b/Tools/scripts/pindent.py index 3f3000d90c..2872dc047e 100755 --- a/Tools/scripts/pindent.py +++ b/Tools/scripts/pindent.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # This file contains a class and a main program that perform three # related (though complimentary) formatting operations on Python @@ -79,8 +79,9 @@ # Defaults STEPSIZE = 8 TABSIZE = 8 -EXPANDTABS = 0 +EXPANDTABS = False +import io import re import sys @@ -88,10 +89,11 @@ next = {} next['if'] = next['elif'] = 'elif', 'else', 'end' next['while'] = next['for'] = 'else', 'end' next['try'] = 'except', 'finally' -next['except'] = 'except', 'else', 'end' -next['else'] = next['finally'] = next['def'] = next['class'] = 'end' +next['except'] = 'except', 'else', 'finally', 'end' +next['else'] = next['finally'] = next['with'] = \ + next['def'] = next['class'] = 'end' next['end'] = () -start = 'if', 'while', 'for', 'try', 'def', 'class' +start = 'if', 'while', 'for', 'try', 'with', 'def', 'class' class PythonIndenter: @@ -105,11 +107,11 @@ class PythonIndenter: self.expandtabs = expandtabs self._write = fpo.write self.kwprog = re.compile( - r'^\s*(?P<kw>[a-z]+)' - r'(\s+(?P<id>[a-zA-Z_]\w*))?' + r'^(?:\s|\\\n)*(?P<kw>[a-z]+)' + r'((?:\s|\\\n)+(?P<id>[a-zA-Z_]\w*))?' r'[^\w]') self.endprog = re.compile( - r'^\s*#?\s*end\s+(?P<kw>[a-z]+)' + r'^(?:\s|\\\n)*#?\s*end\s+(?P<kw>[a-z]+)' r'(\s+(?P<id>[a-zA-Z_]\w*))?' r'[^\w]') self.wsprog = re.compile(r'^[ \t]*') @@ -125,7 +127,7 @@ class PythonIndenter: def readline(self): line = self.fpi.readline() - if line: self.lineno = self.lineno + 1 + if line: self.lineno += 1 # end if return line # end def readline @@ -143,27 +145,24 @@ class PythonIndenter: line2 = self.readline() if not line2: break # end if - line = line + line2 + line += line2 # end while return line # end def getline - def putline(self, line, indent = None): - if indent is None: - self.write(line) - return - # end if + def putline(self, line, indent): tabs, spaces = divmod(indent*self.indentsize, self.tabsize) - i = 0 - m = self.wsprog.match(line) - if m: i = m.end() + i = self.wsprog.match(line).end() + line = line[i:] + if line[:1] not in ('\n', '\r', ''): + line = '\t'*tabs + ' '*spaces + line # end if - self.write('\t'*tabs + ' '*spaces + line[i:]) + self.write(line) # end def putline def reformat(self): stack = [] - while 1: + while True: line = self.getline() if not line: break # EOF # end if @@ -173,10 +172,9 @@ class PythonIndenter: kw2 = m.group('kw') if not stack: self.error('unexpected end') - elif stack[-1][0] != kw2: + elif stack.pop()[0] != kw2: self.error('unmatched end') # end if - del stack[-1:] self.putline(line, len(stack)) continue # end if @@ -208,23 +206,23 @@ class PythonIndenter: def delete(self): begin_counter = 0 end_counter = 0 - while 1: + while True: line = self.getline() if not line: break # EOF # end if m = self.endprog.match(line) if m: - end_counter = end_counter + 1 + end_counter += 1 continue # end if m = self.kwprog.match(line) if m: kw = m.group('kw') if kw in start: - begin_counter = begin_counter + 1 + begin_counter += 1 # end if # end if - self.putline(line) + self.write(line) # end while if begin_counter - end_counter < 0: sys.stderr.write('Warning: input contained more end tags than expected\n') @@ -234,17 +232,12 @@ class PythonIndenter: # end def delete def complete(self): - self.indentsize = 1 stack = [] todo = [] - thisid = '' - current, firstkw, lastkw, topid = 0, '', '', '' - while 1: + currentws = thisid = firstkw = lastkw = topid = '' + while True: line = self.getline() - i = 0 - m = self.wsprog.match(line) - if m: i = m.end() - # end if + i = self.wsprog.match(line).end() m = self.endprog.match(line) if m: thiskw = 'end' @@ -269,7 +262,9 @@ class PythonIndenter: thiskw = '' # end if # end if - indent = len(line[:i].expandtabs(self.tabsize)) + indentws = line[:i] + indent = len(indentws.expandtabs(self.tabsize)) + current = len(currentws.expandtabs(self.tabsize)) while indent < current: if firstkw: if topid: @@ -278,11 +273,11 @@ class PythonIndenter: else: s = '# end %s\n' % firstkw # end if - self.putline(s, current) + self.write(currentws + s) firstkw = lastkw = '' # end if - current, firstkw, lastkw, topid = stack[-1] - del stack[-1] + currentws, firstkw, lastkw, topid = stack.pop() + current = len(currentws.expandtabs(self.tabsize)) # end while if indent == current and firstkw: if thiskw == 'end': @@ -297,18 +292,18 @@ class PythonIndenter: else: s = '# end %s\n' % firstkw # end if - self.putline(s, current) + self.write(currentws + s) firstkw = lastkw = topid = '' # end if # end if if indent > current: - stack.append((current, firstkw, lastkw, topid)) + stack.append((currentws, firstkw, lastkw, topid)) if thiskw and thiskw not in start: # error thiskw = '' # end if - current, firstkw, lastkw, topid = \ - indent, thiskw, thiskw, thisid + currentws, firstkw, lastkw, topid = \ + indentws, thiskw, thiskw, thisid # end if if thiskw: if thiskw in start: @@ -326,7 +321,6 @@ class PythonIndenter: self.write(line) # end while # end def complete - # end class PythonIndenter # Simplified user interface @@ -352,116 +346,86 @@ def reformat_filter(input = sys.stdin, output = sys.stdout, pi.reformat() # end def reformat_filter -class StringReader: - def __init__(self, buf): - self.buf = buf - self.pos = 0 - self.len = len(self.buf) - # end def __init__ - def read(self, n = 0): - if n <= 0: - n = self.len - self.pos - else: - n = min(n, self.len - self.pos) - # end if - r = self.buf[self.pos : self.pos + n] - self.pos = self.pos + n - return r - # end def read - def readline(self): - i = self.buf.find('\n', self.pos) - return self.read(i + 1 - self.pos) - # end def readline - def readlines(self): - lines = [] - line = self.readline() - while line: - lines.append(line) - line = self.readline() - # end while - return lines - # end def readlines - # seek/tell etc. are left as an exercise for the reader -# end class StringReader - -class StringWriter: - def __init__(self): - self.buf = '' - # end def __init__ - def write(self, s): - self.buf = self.buf + s - # end def write - def getvalue(self): - return self.buf - # end def getvalue -# end class StringWriter - def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - input = StringReader(source) - output = StringWriter() + input = io.StringIO(source) + output = io.StringIO() pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs) pi.complete() return output.getvalue() # end def complete_string def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - input = StringReader(source) - output = StringWriter() + input = io.StringIO(source) + output = io.StringIO() pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs) pi.delete() return output.getvalue() # end def delete_string def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - input = StringReader(source) - output = StringWriter() + input = io.StringIO(source) + output = io.StringIO() pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs) pi.reformat() return output.getvalue() # end def reformat_string +def make_backup(filename): + import os, os.path + backup = filename + '~' + if os.path.lexists(backup): + try: + os.remove(backup) + except os.error: + print("Can't remove backup %r" % (backup,), file=sys.stderr) + # end try + # end if + try: + os.rename(filename, backup) + except os.error: + print("Can't rename %r to %r" % (filename, backup), file=sys.stderr) + # end try +# end def make_backup + def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - source = open(filename, 'r').read() + with open(filename, 'r') as f: + source = f.read() + # end with result = complete_string(source, stepsize, tabsize, expandtabs) if source == result: return 0 # end if - import os - try: os.rename(filename, filename + '~') - except os.error: pass - # end try - f = open(filename, 'w') - f.write(result) - f.close() + make_backup(filename) + with open(filename, 'w') as f: + f.write(result) + # end with return 1 # end def complete_file def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - source = open(filename, 'r').read() + with open(filename, 'r') as f: + source = f.read() + # end with result = delete_string(source, stepsize, tabsize, expandtabs) if source == result: return 0 # end if - import os - try: os.rename(filename, filename + '~') - except os.error: pass - # end try - f = open(filename, 'w') - f.write(result) - f.close() + make_backup(filename) + with open(filename, 'w') as f: + f.write(result) + # end with return 1 # end def delete_file def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS): - source = open(filename, 'r').read() + with open(filename, 'r') as f: + source = f.read() + # end with result = reformat_string(source, stepsize, tabsize, expandtabs) if source == result: return 0 # end if - import os - try: os.rename(filename, filename + '~') - except os.error: pass - # end try - f = open(filename, 'w') - f.write(result) - f.close() + make_backup(filename) + with open(filename, 'w') as f: + f.write(result) + # end with return 1 # end def reformat_file @@ -474,7 +438,7 @@ usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ... -r : reformat a completed program (use #end directives) -s stepsize: indentation step (default %(STEPSIZE)d) -t tabsize : the worth in spaces of a tab (default %(TABSIZE)d) --e : expand TABs into spaces (defailt OFF) +-e : expand TABs into spaces (default OFF) [file] ... : files are changed in place, with backups in file~ If no files are specified or a single - is given, the program acts as a filter (reads stdin, writes stdout). @@ -517,7 +481,7 @@ def test(): elif o == '-t': tabsize = int(a) elif o == '-e': - expandtabs = 1 + expandtabs = True # end if # end for if not action: diff --git a/Tools/scripts/ptags.py b/Tools/scripts/ptags.py index ac01356076..ca643b3494 100755 --- a/Tools/scripts/ptags.py +++ b/Tools/scripts/ptags.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # ptags # diff --git a/Tools/scripts/pysource.py b/Tools/scripts/pysource.py index 05c2b8667f..048131e243 100644..100755 --- a/Tools/scripts/pysource.py +++ b/Tools/scripts/pysource.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """\ List python source files. diff --git a/Tools/scripts/redemo.py b/Tools/scripts/redemo.py deleted file mode 100644 index 52863324c2..0000000000 --- a/Tools/scripts/redemo.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Basic regular expression demostration facility (Perl style syntax).""" - -from tkinter import * -import re - -class ReDemo: - - def __init__(self, master): - self.master = master - - self.promptdisplay = Label(self.master, anchor=W, - text="Enter a Perl-style regular expression:") - self.promptdisplay.pack(side=TOP, fill=X) - - self.regexdisplay = Entry(self.master) - self.regexdisplay.pack(fill=X) - self.regexdisplay.focus_set() - - self.addoptions() - - self.statusdisplay = Label(self.master, text="", anchor=W) - self.statusdisplay.pack(side=TOP, fill=X) - - self.labeldisplay = Label(self.master, anchor=W, - text="Enter a string to search:") - self.labeldisplay.pack(fill=X) - self.labeldisplay.pack(fill=X) - - self.showframe = Frame(master) - self.showframe.pack(fill=X, anchor=W) - - self.showvar = StringVar(master) - self.showvar.set("first") - - self.showfirstradio = Radiobutton(self.showframe, - text="Highlight first match", - variable=self.showvar, - value="first", - command=self.recompile) - self.showfirstradio.pack(side=LEFT) - - self.showallradio = Radiobutton(self.showframe, - text="Highlight all matches", - variable=self.showvar, - value="all", - command=self.recompile) - self.showallradio.pack(side=LEFT) - - self.stringdisplay = Text(self.master, width=60, height=4) - self.stringdisplay.pack(fill=BOTH, expand=1) - self.stringdisplay.tag_configure("hit", background="yellow") - - self.grouplabel = Label(self.master, text="Groups:", anchor=W) - self.grouplabel.pack(fill=X) - - self.grouplist = Listbox(self.master) - self.grouplist.pack(expand=1, fill=BOTH) - - self.regexdisplay.bind('<Key>', self.recompile) - self.stringdisplay.bind('<Key>', self.reevaluate) - - self.compiled = None - self.recompile() - - btags = self.regexdisplay.bindtags() - self.regexdisplay.bindtags(btags[1:] + btags[:1]) - - btags = self.stringdisplay.bindtags() - self.stringdisplay.bindtags(btags[1:] + btags[:1]) - - def addoptions(self): - self.frames = [] - self.boxes = [] - self.vars = [] - for name in ('IGNORECASE', - 'LOCALE', - 'MULTILINE', - 'DOTALL', - 'VERBOSE'): - if len(self.boxes) % 3 == 0: - frame = Frame(self.master) - frame.pack(fill=X) - self.frames.append(frame) - val = getattr(re, name) - var = IntVar() - box = Checkbutton(frame, - variable=var, text=name, - offvalue=0, onvalue=val, - command=self.recompile) - box.pack(side=LEFT) - self.boxes.append(box) - self.vars.append(var) - - def getflags(self): - flags = 0 - for var in self.vars: - flags = flags | var.get() - flags = flags - return flags - - def recompile(self, event=None): - try: - self.compiled = re.compile(self.regexdisplay.get(), - self.getflags()) - bg = self.promptdisplay['background'] - self.statusdisplay.config(text="", background=bg) - except re.error as msg: - self.compiled = None - self.statusdisplay.config( - text="re.error: %s" % str(msg), - background="red") - self.reevaluate() - - def reevaluate(self, event=None): - try: - self.stringdisplay.tag_remove("hit", "1.0", END) - except TclError: - pass - try: - self.stringdisplay.tag_remove("hit0", "1.0", END) - except TclError: - pass - self.grouplist.delete(0, END) - if not self.compiled: - return - self.stringdisplay.tag_configure("hit", background="yellow") - self.stringdisplay.tag_configure("hit0", background="orange") - text = self.stringdisplay.get("1.0", END) - last = 0 - nmatches = 0 - while last <= len(text): - m = self.compiled.search(text, last) - if m is None: - break - first, last = m.span() - if last == first: - last = first+1 - tag = "hit0" - else: - tag = "hit" - pfirst = "1.0 + %d chars" % first - plast = "1.0 + %d chars" % last - self.stringdisplay.tag_add(tag, pfirst, plast) - if nmatches == 0: - self.stringdisplay.yview_pickplace(pfirst) - groups = list(m.groups()) - groups.insert(0, m.group()) - for i in range(len(groups)): - g = "%2d: %r" % (i, groups[i]) - self.grouplist.insert(END, g) - nmatches = nmatches + 1 - if self.showvar.get() == "first": - break - - if nmatches == 0: - self.statusdisplay.config(text="(no match)", - background="yellow") - else: - self.statusdisplay.config(text="") - - -# Main function, run when invoked as a stand-alone Python program. - -def main(): - root = Tk() - demo = ReDemo(root) - root.protocol('WM_DELETE_WINDOW', root.quit) - root.mainloop() - -if __name__ == '__main__': - main() diff --git a/Tools/scripts/reindent-rst.py b/Tools/scripts/reindent-rst.py index ceb84bfd3c..25608af66a 100755 --- a/Tools/scripts/reindent-rst.py +++ b/Tools/scripts/reindent-rst.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Make a reST file compliant to our pre-commit hook. # Currently just remove trailing whitespace. diff --git a/Tools/scripts/reindent.py b/Tools/scripts/reindent.py index 8557b5debd..b18993b0c2 100755 --- a/Tools/scripts/reindent.py +++ b/Tools/scripts/reindent.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Released to the public domain, by Tim Peters, 03 October 2000. @@ -35,33 +35,34 @@ tabnanny.py, reindent should do a good job. The backup file is a copy of the one that is being reindented. The ".bak" file is generated with shutil.copy(), but some corner cases regarding -user/group and permissions could leave the backup file more readable that +user/group and permissions could leave the backup file more readable than you'd prefer. You can always use the --nobackup option to prevent this. """ __version__ = "1" import tokenize -import os, shutil +import os +import shutil import sys -verbose = 0 -recurse = 0 -dryrun = 0 +verbose = False +recurse = False +dryrun = False makebackup = True + def usage(msg=None): - if msg is not None: - print(msg, file=sys.stderr) - print(__doc__, file=sys.stderr) + if msg is None: + msg = __doc__ + print(msg, file=sys.stderr) + def errprint(*args): - sep = "" - for arg in args: - sys.stderr.write(sep + str(arg)) - sep = " " + sys.stderr.write(" ".join(str(arg) for arg in args)) sys.stderr.write("\n") + def main(): import getopt global verbose, recurse, dryrun, makebackup @@ -73,13 +74,13 @@ def main(): return for o, a in opts: if o in ('-d', '--dryrun'): - dryrun += 1 + dryrun = True elif o in ('-r', '--recurse'): - recurse += 1 + recurse = True elif o in ('-n', '--nobackup'): makebackup = False elif o in ('-v', '--verbose'): - verbose += 1 + verbose = True elif o in ('-h', '--help'): usage() return @@ -91,6 +92,7 @@ def main(): for arg in args: check(arg) + def check(file): if os.path.isdir(file) and not os.path.islink(file): if verbose: @@ -107,14 +109,20 @@ def check(file): if verbose: print("checking", file, "...", end=' ') + with open(file, 'rb') as f: + encoding, _ = tokenize.detect_encoding(f.readline) try: - f = open(file) + with open(file, encoding=encoding) as f: + r = Reindenter(f) except IOError as msg: errprint("%s: I/O Error: %s" % (file, str(msg))) return - r = Reindenter(f) - f.close() + newline = r.newlines + if isinstance(newline, tuple): + errprint("%s: mixed newlines detected; cannot process file" % file) + return + if r.run(): if verbose: print("changed.") @@ -126,9 +134,8 @@ def check(file): shutil.copyfile(file, bak) if verbose: print("backed up", file, "to", bak) - f = open(file, "w") - r.write(f) - f.close() + with open(file, "w", encoding=encoding, newline=newline) as f: + r.write(f) if verbose: print("wrote new", file) return True @@ -137,6 +144,7 @@ def check(file): print("unchanged.") return False + def _rstrip(line, JUNK='\n \t'): """Return line stripped of trailing spaces, tabs, newlines. @@ -146,10 +154,11 @@ def _rstrip(line, JUNK='\n \t'): """ i = len(line) - while i > 0 and line[i-1] in JUNK: + while i > 0 and line[i - 1] in JUNK: i -= 1 return line[:i] + class Reindenter: def __init__(self, f): @@ -173,6 +182,10 @@ class Reindenter: # indeed, they're our headache! self.stats = [] + # Save the newlines found in the file so they can be used to + # create output without mutating the newlines. + self.newlines = f.newlines + def run(self): tokens = tokenize.generate_tokens(self.getline) for _token in tokens: @@ -192,9 +205,9 @@ class Reindenter: # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) - for i in range(len(stats)-1): + for i in range(len(stats) - 1): thisstmt, thislevel = stats[i] - nextstmt = stats[i+1][0] + nextstmt = stats[i + 1][0] have = getlspace(lines[thisstmt]) want = thislevel * 4 if want < 0: @@ -206,7 +219,7 @@ class Reindenter: want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. - for j in range(i+1, len(stats)-1): + for j in range(i + 1, len(stats) - 1): jline, jlevel = stats[j] if jlevel >= 0: if have == getlspace(lines[jline]): @@ -216,11 +229,11 @@ class Reindenter: # comment like this one, # in which case we should shift it like its base # line got shifted. - for j in range(i-1, -1, -1): + for j in range(i - 1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: - want = have + getlspace(after[jline-1]) - \ - getlspace(lines[jline]) + want = have + (getlspace(after[jline - 1]) - + getlspace(lines[jline])) break if want < 0: # Still no luck -- leave it alone. @@ -295,6 +308,7 @@ class Reindenter: if line: # not endmarker self.stats.append((slinecol[0], self.level)) + # Count number of leading blanks. def getlspace(line): i, n = 0, len(line) @@ -302,5 +316,6 @@ def getlspace(line): i += 1 return i + if __name__ == '__main__': main() diff --git a/Tools/scripts/rgrep.py b/Tools/scripts/rgrep.py index 12d736e2a1..1917e05e49 100755 --- a/Tools/scripts/rgrep.py +++ b/Tools/scripts/rgrep.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Reverse grep. @@ -9,8 +9,9 @@ import sys import re import getopt + def main(): - bufsize = 64*1024 + bufsize = 64 * 1024 reflags = 0 opts, args = getopt.getopt(sys.argv[1:], "i") for o, a in opts: @@ -24,11 +25,11 @@ def main(): try: prog = re.compile(pattern, reflags) except re.error as msg: - usage("error in regular expression: %s" % str(msg)) + usage("error in regular expression: %s" % msg) try: f = open(filename) except IOError as msg: - usage("can't open %s: %s" % (repr(filename), str(msg)), 1) + usage("can't open %r: %s" % (filename, msg), 1) f.seek(0, 2) pos = f.tell() leftover = None @@ -49,16 +50,17 @@ def main(): del lines[0] else: leftover = None - lines.reverse() - for line in lines: + for line in reversed(lines): if prog.search(line): print(line) + def usage(msg, code=2): sys.stdout = sys.stderr print(msg) print(__doc__) sys.exit(code) + if __name__ == '__main__': main() diff --git a/Tools/scripts/serve.py b/Tools/scripts/serve.py new file mode 100755 index 0000000000..68c25f057f --- /dev/null +++ b/Tools/scripts/serve.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +''' +Small wsgiref based web server. Takes a path to serve from and an +optional port number (defaults to 8000), then tries to serve files. +Mime types are guessed from the file names, 404 errors are raised +if the file is not found. Used for the make serve target in Doc. +''' +import sys +import os +import mimetypes +from wsgiref import simple_server, util + +def app(environ, respond): + + fn = os.path.join(path, environ['PATH_INFO'][1:]) + if '.' not in fn.split(os.path.sep)[-1]: + fn = os.path.join(fn, 'index.html') + type = mimetypes.guess_type(fn)[0] + + if os.path.exists(fn): + respond('200 OK', [('Content-Type', type)]) + return util.FileWrapper(open(fn, "rb")) + else: + respond('404 Not Found', [('Content-Type', 'text/plain')]) + return ['not found'] + +if __name__ == '__main__': + path = sys.argv[1] + port = int(sys.argv[2]) if len(sys.argv) > 2 else 8000 + httpd = simple_server.make_server('', port, app) + print("Serving {} on port {}, control-C to stop".format(path, port)) + try: + httpd.serve_forever() + except KeyboardInterrupt: + print("\b\bShutting down.") diff --git a/Tools/scripts/setup.py b/Tools/scripts/setup.py deleted file mode 100644 index 7a50368eac..0000000000 --- a/Tools/scripts/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -from distutils.core import setup - -if __name__ == '__main__': - setup( - scripts=[ - 'byteyears.py', - 'checkpyc.py', - 'copytime.py', - 'crlf.py', - 'dutree.py', - 'ftpmirror.py', - 'h2py.py', - 'lfcr.py', - '../i18n/pygettext.py', - 'logmerge.py', - '../../Lib/tabnanny.py', - '../../Lib/timeit.py', - 'untabify.py', - ], - ) diff --git a/Tools/scripts/suff.py b/Tools/scripts/suff.py index 462ec32183..0eea0d7548 100755 --- a/Tools/scripts/suff.py +++ b/Tools/scripts/suff.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # suff # @@ -6,24 +6,21 @@ import sys + def main(): files = sys.argv[1:] suffixes = {} for filename in files: suff = getsuffix(filename) - if suff not in suffixes: - suffixes[suff] = [] - suffixes[suff].append(filename) - keys = sorted(suffixes.keys()) - for suff in keys: - print(repr(suff), len(suffixes[suff])) + suffixes.setdefault(suff, []).append(filename) + for suff, filenames in sorted(suffixes.items()): + print(repr(suff), len(filenames)) + def getsuffix(filename): - suff = '' - for i in range(len(filename)): - if filename[i] == '.': - suff = filename[i:] - return suff + name, sep, suff = filename.rpartition('.') + return sep + suff if sep else '' + if __name__ == '__main__': main() diff --git a/Tools/scripts/svneol.py b/Tools/scripts/svneol.py index 9357c7ed8d..8abdd01529 100644..100755 --- a/Tools/scripts/svneol.py +++ b/Tools/scripts/svneol.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """ SVN helper script. @@ -32,9 +32,12 @@ and for a file with a binary mime-type property: import re import os +import sys +import subprocess + def propfiles(root, fn): - default = os.path.join(root, ".svn", "props", fn+".svn-work") + default = os.path.join(root, ".svn", "props", fn + ".svn-work") try: format = int(open(os.path.join(root, ".svn", "format")).read().strip()) except IOError: @@ -42,12 +45,13 @@ def propfiles(root, fn): if format in (8, 9): # In version 8 and 9, committed props are stored in prop-base, local # modifications in props - return [os.path.join(root, ".svn", "prop-base", fn+".svn-base"), - os.path.join(root, ".svn", "props", fn+".svn-work")] - raise ValueError, "Unknown repository format" + return [os.path.join(root, ".svn", "prop-base", fn + ".svn-base"), + os.path.join(root, ".svn", "props", fn + ".svn-work")] + raise ValueError("Unknown repository format") + def proplist(root, fn): - "Return a list of property names for file fn in directory root" + """Return a list of property names for file fn in directory root.""" result = [] for path in propfiles(root, fn): try: @@ -56,7 +60,7 @@ def proplist(root, fn): # no properties file: not under version control, # or no properties set continue - while 1: + while True: # key-value pairs, of the form # K <length> # <keyname>NL @@ -79,13 +83,32 @@ def proplist(root, fn): f.close() return result + +def set_eol_native(path): + cmd = 'svn propset svn:eol-style native "{}"'.format(path) + propset = subprocess.Popen(cmd, shell=True) + propset.wait() + + possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search -for root, dirs, files in os.walk('.'): - if '.svn' in dirs: - dirs.remove('.svn') - for fn in files: - if possible_text_file(fn): + +def main(): + for arg in sys.argv[1:] or [os.curdir]: + if os.path.isfile(arg): + root, fn = os.path.split(arg) if 'svn:eol-style' not in proplist(root, fn): - path = os.path.join(root, fn) - os.system('svn propset svn:eol-style native "%s"' % path) + set_eol_native(arg) + elif os.path.isdir(arg): + for root, dirs, files in os.walk(arg): + if '.svn' in dirs: + dirs.remove('.svn') + for fn in files: + if possible_text_file(fn): + if 'svn:eol-style' not in proplist(root, fn): + path = os.path.join(root, fn) + set_eol_native(path) + + +if __name__ == '__main__': + main() diff --git a/Tools/scripts/texi2html.py b/Tools/scripts/texi2html.py index 86229f2ad3..af2147a76b 100755 --- a/Tools/scripts/texi2html.py +++ b/Tools/scripts/texi2html.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Convert GNU texinfo files into HTML, one file per node. # Based on Texinfo 2.14. diff --git a/Tools/scripts/treesync.py b/Tools/scripts/treesync.py index 8643ee73ba..b2649c474e 100755 --- a/Tools/scripts/treesync.py +++ b/Tools/scripts/treesync.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 """Script to synchronize two source trees. diff --git a/Tools/scripts/untabify.py b/Tools/scripts/untabify.py index 1f455208ee..4b67c15154 100755 --- a/Tools/scripts/untabify.py +++ b/Tools/scripts/untabify.py @@ -1,10 +1,11 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 "Replace tabs with spaces in argument files. Print names of changed files." import os import sys import getopt +import tokenize def main(): tabsize = 8 @@ -23,11 +24,12 @@ def main(): for filename in args: process(filename, tabsize) + def process(filename, tabsize, verbose=True): try: - f = open(filename) - text = f.read() - f.close() + with tokenize.open(filename) as f: + text = f.read() + encoding = f.encoding except IOError as msg: print("%r: I/O error: %s" % (filename, msg)) return @@ -43,11 +45,11 @@ def process(filename, tabsize, verbose=True): os.rename(filename, backup) except os.error: pass - f = open(filename, "w") - f.write(newtext) - f.close() + with open(filename, "w", encoding=encoding) as f: + f.write(newtext) if verbose: print(filename) + if __name__ == '__main__': main() diff --git a/Tools/scripts/which.py b/Tools/scripts/which.py index a9f4907812..4fc37a027c 100755 --- a/Tools/scripts/which.py +++ b/Tools/scripts/which.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Variant of "which". # On stderr, near and total misses are reported. diff --git a/Tools/scripts/xxci.py b/Tools/scripts/xxci.py deleted file mode 100755 index 8cffc9e11b..0000000000 --- a/Tools/scripts/xxci.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python - -# xxci -# -# check in files for which rcsdiff returns nonzero exit status - -import sys -import os -from stat import * -import fnmatch - -EXECMAGIC = '\001\140\000\010' - -MAXSIZE = 200*1024 # Files this big must be binaries and are skipped. - -def getargs(): - args = sys.argv[1:] - if args: - return args - print('No arguments, checking almost *, in "ls -t" order') - list = [] - for file in os.listdir(os.curdir): - if not skipfile(file): - list.append((getmtime(file), file)) - list.sort() - if not list: - print('Nothing to do -- exit 1') - sys.exit(1) - list.sort() - list.reverse() - for mtime, file in list: args.append(file) - return args - -def getmtime(file): - try: - st = os.stat(file) - return st[ST_MTIME] - except os.error: - return -1 - -badnames = ['tags', 'TAGS', 'xyzzy', 'nohup.out', 'core'] -badprefixes = ['.', ',', '@', '#', 'o.'] -badsuffixes = \ - ['~', '.a', '.o', '.old', '.bak', '.orig', '.new', '.prev', '.not', \ - '.pyc', '.fdc', '.rgb', '.elc', ',v'] -ignore = [] - -def setup(): - ignore[:] = badnames - for p in badprefixes: - ignore.append(p + '*') - for p in badsuffixes: - ignore.append('*' + p) - try: - f = open('.xxcign', 'r') - except IOError: - return - ignore[:] = ignore + f.read().split() - -def skipfile(file): - for p in ignore: - if fnmatch.fnmatch(file, p): return 1 - try: - st = os.lstat(file) - except os.error: - return 1 # Doesn't exist -- skip it - # Skip non-plain files. - if not S_ISREG(st[ST_MODE]): return 1 - # Skip huge files -- probably binaries. - if st[ST_SIZE] >= MAXSIZE: return 1 - # Skip executables - try: - data = open(file, 'r').read(len(EXECMAGIC)) - if data == EXECMAGIC: return 1 - except: - pass - return 0 - -def badprefix(file): - for bad in badprefixes: - if file[:len(bad)] == bad: return 1 - return 0 - -def badsuffix(file): - for bad in badsuffixes: - if file[-len(bad):] == bad: return 1 - return 0 - -def go(args): - for file in args: - print(file + ':') - if differing(file): - showdiffs(file) - if askyesno('Check in ' + file + ' ? '): - sts = os.system('rcs -l ' + file) # ignored - sts = os.system('ci -l ' + file) - -def differing(file): - cmd = 'co -p ' + file + ' 2>/dev/null | cmp -s - ' + file - sts = os.system(cmd) - return sts != 0 - -def showdiffs(file): - cmd = 'rcsdiff ' + file + ' 2>&1 | ${PAGER-more}' - sts = os.system(cmd) - -def raw_input(prompt): - sys.stdout.write(prompt) - sys.stdout.flush() - return sys.stdin.readline() - -def askyesno(prompt): - s = input(prompt) - return s in ['y', 'yes'] - -if __name__ == '__main__': - try: - setup() - go(getargs()) - except KeyboardInterrupt: - print('[Intr]') |