From a2605b297c147a1ad54078181d32fe369fa4f37d Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Wed, 3 Aug 2016 21:55:39 -0400 Subject: use abi3 extension if Extension().is_abi3 --- setuptools/command/build_ext.py | 13 +++++++++++++ setuptools/extension.py | 4 ++++ setuptools/tests/test_build_ext.py | 20 +++++++++++++++++++- 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index e6db0764..dad28999 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -59,6 +59,14 @@ elif os.name != 'nt': if_dl = lambda s: s if have_rtld else '' +def get_abi3_suffix(): + """Return the file extension for an abi3-compliant Extension()""" + import imp + for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): + if '.abi3' in suffix: # Unix + return suffix + elif suffix == '.pyd': # Windows + return suffix class build_ext(_build_ext): @@ -96,6 +104,11 @@ class build_ext(_build_ext): filename = _build_ext.get_ext_filename(self, fullname) if fullname in self.ext_map: ext = self.ext_map[fullname] + if sys.version_info[0] != 2 and getattr(ext, 'is_abi3'): + from distutils.sysconfig import get_config_var + so_ext = get_config_var('SO') + filename = filename[:-len(so_ext)] + filename = filename + get_abi3_suffix() if isinstance(ext, Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn, libtype) diff --git a/setuptools/extension.py b/setuptools/extension.py index 5ea72c06..a6cc0915 100644 --- a/setuptools/extension.py +++ b/setuptools/extension.py @@ -36,6 +36,10 @@ have_pyrex = _have_cython class Extension(_Extension): """Extension that uses '.c' files in place of '.pyx' files""" + def __init__(self, name, sources, is_abi3=False, **kw): + self.is_abi3 = is_abi3 + _Extension.__init__(self, name, sources, **kw) + def _convert_pyx_sources_to_lang(self): """ Replace sources with .pyx extensions to sources with the target diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index 5168ebf0..e0f2e73b 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -1,8 +1,10 @@ +import sys import distutils.command.build_ext as orig +from distutils.sysconfig import get_config_var from setuptools.command.build_ext import build_ext from setuptools.dist import Distribution - +from setuptools.extension import Extension class TestBuildExt: @@ -18,3 +20,19 @@ class TestBuildExt: res = cmd.get_ext_filename('foo') wanted = orig.build_ext.get_ext_filename(cmd, 'foo') assert res == wanted + + def test_abi3_filename(self): + """ + Filename needs to be loadable by several versions + of Python 3 if 'is_abi3' is truthy on Extension() + """ + dist = Distribution(dict(ext_modules=[Extension('spam.eggs', [], is_abi3=True)])) + cmd = build_ext(dist) + res = cmd.get_ext_filename('spam.eggs') + + if sys.version_info[0] == 2: + assert res.endswith(get_config_var('SO')) + elif sys.platform == 'win32': + assert res.endswith('eggs.pyd') + else: + assert 'abi3' in res \ No newline at end of file -- cgit v1.2.1 From dab253cb72eb7c098393f985e32585e079607f66 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Fri, 5 Aug 2016 07:45:09 -0400 Subject: call finalize_options in test --- setuptools/tests/test_build_ext.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index e0f2e73b..c71aadca 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -2,7 +2,7 @@ import sys import distutils.command.build_ext as orig from distutils.sysconfig import get_config_var -from setuptools.command.build_ext import build_ext +from setuptools.command.build_ext import build_ext, get_abi3_suffix from setuptools.dist import Distribution from setuptools.extension import Extension @@ -26,8 +26,13 @@ class TestBuildExt: Filename needs to be loadable by several versions of Python 3 if 'is_abi3' is truthy on Extension() """ - dist = Distribution(dict(ext_modules=[Extension('spam.eggs', [], is_abi3=True)])) + print(get_abi3_suffix()) + + extension = Extension('spam.eggs', ['eggs.c'], is_abi3=True) + dist = Distribution(dict(ext_modules=[extension])) cmd = build_ext(dist) + cmd.finalize_options() + assert 'spam.eggs' in cmd.ext_map res = cmd.get_ext_filename('spam.eggs') if sys.version_info[0] == 2: -- cgit v1.2.1 From 702a4277768f0781e3d0a4cf770d29621f7f2cc3 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Fri, 5 Aug 2016 07:55:57 -0400 Subject: rename is_abi3 to py_limited_api --- setuptools/command/build_ext.py | 4 +++- setuptools/extension.py | 4 ++-- setuptools/tests/test_build_ext.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index dad28999..7bb4d24c 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -104,7 +104,9 @@ class build_ext(_build_ext): filename = _build_ext.get_ext_filename(self, fullname) if fullname in self.ext_map: ext = self.ext_map[fullname] - if sys.version_info[0] != 2 and getattr(ext, 'is_abi3'): + if (sys.version_info[0] != 2 + and getattr(ext, 'py_limited_api') + and get_abi3_suffix()): from distutils.sysconfig import get_config_var so_ext = get_config_var('SO') filename = filename[:-len(so_ext)] diff --git a/setuptools/extension.py b/setuptools/extension.py index a6cc0915..c8e9bc7d 100644 --- a/setuptools/extension.py +++ b/setuptools/extension.py @@ -36,8 +36,8 @@ have_pyrex = _have_cython class Extension(_Extension): """Extension that uses '.c' files in place of '.pyx' files""" - def __init__(self, name, sources, is_abi3=False, **kw): - self.is_abi3 = is_abi3 + def __init__(self, name, sources, py_limited_api=False, **kw): + self.py_limited_api = py_limited_api _Extension.__init__(self, name, sources, **kw) def _convert_pyx_sources_to_lang(self): diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index c71aadca..100869f6 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -28,7 +28,7 @@ class TestBuildExt: """ print(get_abi3_suffix()) - extension = Extension('spam.eggs', ['eggs.c'], is_abi3=True) + extension = Extension('spam.eggs', ['eggs.c'], py_limited_api=True) dist = Distribution(dict(ext_modules=[extension])) cmd = build_ext(dist) cmd.finalize_options() -- cgit v1.2.1 From 0a3f850b3bc6c888f0f8810e453e664f9fd320f6 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Fri, 5 Aug 2016 08:01:58 -0400 Subject: gate test against get_abi3_suffix() --- setuptools/tests/test_build_ext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index 100869f6..f2e1f59d 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -35,7 +35,7 @@ class TestBuildExt: assert 'spam.eggs' in cmd.ext_map res = cmd.get_ext_filename('spam.eggs') - if sys.version_info[0] == 2: + if sys.version_info[0] == 2 or not get_abi3_suffix(): assert res.endswith(get_config_var('SO')) elif sys.platform == 'win32': assert res.endswith('eggs.pyd') -- cgit v1.2.1 From 5e1693d225b2416712e11da591f60c085ce62957 Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Tue, 16 Aug 2016 12:43:16 +0200 Subject: numpy.distutils and distutils._msvccompiler compatibility - Fix compatibility between `numpy.distutils` and `distutils._msvccompiler`. See #728 : Setuptools 24 `msvc.py` improvement import `distutils._msvccompiler` (New Python 3.5 C compiler for MSVC >= 14), but this one is not compatible with `numpy.distutils` (because not patched with `numpy.distutils.ccompiler.gen_lib_options`) and return unquoted libpaths when linking. The problem was patched in Numpy, but need to be patched also in Setuptools for compatibility between older versions of Numpy and `distutils._msvccompiler` (and indirectly Setuptools > 24). - Replace some residuals `except Exception`. --- setuptools/msvc.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index 4616d4be..bd486fa1 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -75,14 +75,23 @@ def patch_for_specialized_compiler(): msvc9compiler.find_vcvarsall = msvc9_find_vcvarsall unpatched['msvc9_query_vcvarsall'] = msvc9compiler.query_vcvarsall msvc9compiler.query_vcvarsall = msvc9_query_vcvarsall - except Exception: + except NameError: pass try: # Patch distutils._msvccompiler._get_vc_env unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env msvc14compiler._get_vc_env = msvc14_get_vc_env - except Exception: + except NameError: + pass + + try: + # Apply "gen_lib_options" patch from Numpy to "distutils._msvccompiler" + # to fix compatibility between "numpy.distutils" and + # "distutils._msvccompiler" (for Numpy < 1.11.2) + import numpy.distutils as np_distutils + msvc14compiler.gen_lib_options = np_distutils.ccompiler.gen_lib_options + except (ImportError, NameError): pass @@ -243,7 +252,8 @@ def _augment_exception(exc, version, arch=''): elif version >= 14.0: # For VC++ 14.0 Redirect user to Visual C++ Build Tools message += (' Get it with "Microsoft Visual C++ Build Tools": ' - r'http://landinghub.visualstudio.com/visual-cpp-build-tools') + r'http://landinghub.visualstudio.com/' + 'visual-cpp-build-tools') exc.args = (message, ) -- cgit v1.2.1 From 4c2b0a2d9c19218086b5a6ecb837403bd5bb8135 Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Tue, 16 Aug 2016 12:55:19 +0200 Subject: Add history for unquoted libpaths problem --- CHANGES.rst | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index bffad4c5..9f098df9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,6 +2,11 @@ CHANGES ======= +v25.3.1 +------- + +* #739 Fix unquoted libpaths by fixing compatibility between `numpy.distutils` and `distutils._msvccompiler` for numpy < 1.11.2 (Fix issue #728, error also fixed in Numpy). + v25.3.0 ------- @@ -16,20 +21,21 @@ v25.2.0 v25.1.6 ------- -* #725 +* #725: revert `library_dir_option` patch (Error is related to `numpy.distutils` and make errors on non Numpy users). v25.1.5 ------- * #720 -* #723 +* #723: Improve patch for `library_dir_option`. v25.1.4 ------- * #717 * #713 -* #707 via #715 +* #707: Fix Python 2 compatibility for MSVC by catching errors properly. +* #715: Fix unquoted libpaths by patching `library_dir_option`. v25.1.3 ------- -- cgit v1.2.1 From 38d6743427f0aa31eaf2eb07df5cd11b6526d036 Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Tue, 16 Aug 2016 19:12:42 +0200 Subject: Patch with numpy at execution time and not at import time --- setuptools/msvc.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index bd486fa1..4646d83f 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -79,19 +79,17 @@ def patch_for_specialized_compiler(): pass try: - # Patch distutils._msvccompiler._get_vc_env + # Patch distutils._msvccompiler._get_vc_env for numpy compatibility unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env msvc14compiler._get_vc_env = msvc14_get_vc_env except NameError: pass try: - # Apply "gen_lib_options" patch from Numpy to "distutils._msvccompiler" - # to fix compatibility between "numpy.distutils" and - # "distutils._msvccompiler" (for Numpy < 1.11.2) - import numpy.distutils as np_distutils - msvc14compiler.gen_lib_options = np_distutils.ccompiler.gen_lib_options - except (ImportError, NameError): + # Patch distutils._msvccompiler.gen_lib_options + unpatched['msvc14_gen_lib_options'] = msvc14compiler.gen_lib_options + msvc14compiler.gen_lib_options = msvc14_gen_lib_options + except NameError: pass @@ -221,6 +219,19 @@ def msvc14_get_vc_env(plat_spec): raise +def msvc14_gen_lib_options(*args, **kwargs): + """ + Patched "distutils._msvccompiler.gen_lib_options" for fix + compatibility between "numpy.distutils" and "distutils._msvccompiler" + (for Numpy < 1.11.2) + """ + if "numpy" in distutils.ccompiler.CCompiler.spawn.__module__: + import numpy as np + return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) + else: + return unpatched['msvc14_gen_lib_options'](*args, **kwargs) + + def _augment_exception(exc, version, arch=''): """ Add details to the exception message to help guide the user -- cgit v1.2.1 From f5802f369d5b7b76a7feb4c49e2e49840058bf3b Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Tue, 16 Aug 2016 19:33:07 +0200 Subject: Improve numpy.distutils detection Agree with @pitrou comment. --- setuptools/msvc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index 4646d83f..57153d8a 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -2,6 +2,7 @@ This module adds improved support for Microsoft Visual C++ compilers. """ import os +import sys import platform import itertools import distutils.errors @@ -225,7 +226,7 @@ def msvc14_gen_lib_options(*args, **kwargs): compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2) """ - if "numpy" in distutils.ccompiler.CCompiler.spawn.__module__: + if "numpy.distutils" in sys.modules: import numpy as np return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) else: -- cgit v1.2.1 From 21be70b60cfea8da91df4687a21f262a59809073 Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Tue, 16 Aug 2016 19:34:48 +0200 Subject: Wrong line for comment --- setuptools/msvc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index 57153d8a..360c1a68 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -80,14 +80,14 @@ def patch_for_specialized_compiler(): pass try: - # Patch distutils._msvccompiler._get_vc_env for numpy compatibility + # Patch distutils._msvccompiler._get_vc_env unpatched['msvc14_get_vc_env'] = msvc14compiler._get_vc_env msvc14compiler._get_vc_env = msvc14_get_vc_env except NameError: pass try: - # Patch distutils._msvccompiler.gen_lib_options + # Patch distutils._msvccompiler.gen_lib_options for Numpy unpatched['msvc14_gen_lib_options'] = msvc14compiler.gen_lib_options msvc14compiler.gen_lib_options = msvc14_gen_lib_options except NameError: -- cgit v1.2.1 From fadf148e520e84cdd23856457040799dfe9fa592 Mon Sep 17 00:00:00 2001 From: Tim Heap Date: Tue, 16 Aug 2016 15:59:17 +1000 Subject: Add tests for MANIFEST.in --- setuptools/tests/test_manifest.py | 210 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 setuptools/tests/test_manifest.py diff --git a/setuptools/tests/test_manifest.py b/setuptools/tests/test_manifest.py new file mode 100644 index 00000000..abee5128 --- /dev/null +++ b/setuptools/tests/test_manifest.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +"""sdist tests""" + +import contextlib +import os +import shutil +import sys +import tempfile + +from setuptools.command.egg_info import egg_info +from setuptools.dist import Distribution +from setuptools.extern import six +from setuptools.tests.textwrap import DALS + +import pytest + +py3_only = pytest.mark.xfail(six.PY2, reason="Test runs on Python 3 only") + + +def make_local_path(s): + """Converts '/' in a string to os.sep""" + return s.replace('/', os.sep) + + +SETUP_ATTRS = { + 'name': 'app', + 'version': '0.0', + 'packages': ['app'], +} + + +SETUP_PY = """\ +from setuptools import setup + +setup(**%r) +""" % SETUP_ATTRS + + +@contextlib.contextmanager +def quiet(): + old_stdout, old_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = six.StringIO(), six.StringIO() + try: + yield + finally: + sys.stdout, sys.stderr = old_stdout, old_stderr + + +def touch(filename): + open(filename, 'w').close() + +# The set of files always in the manifest, including all files in the +# .egg-info directory +default_files = frozenset(map(make_local_path, [ + 'README.rst', + 'MANIFEST.in', + 'setup.py', + 'app.egg-info/PKG-INFO', + 'app.egg-info/SOURCES.txt', + 'app.egg-info/dependency_links.txt', + 'app.egg-info/top_level.txt', + 'app/__init__.py', +])) + + +class TestManifestTest: + + def setup_method(self, method): + self.temp_dir = tempfile.mkdtemp() + f = open(os.path.join(self.temp_dir, 'setup.py'), 'w') + f.write(SETUP_PY) + f.close() + + """ + Create a file tree like: + - LICENSE + - README.rst + - testing.rst + - .hidden.rst + - app/ + - __init__.py + - a.txt + - b.txt + - c.rst + - static/ + - app.js + - app.js.map + - app.css + - app.css.map + """ + + for fname in ['README.rst', '.hidden.rst', 'testing.rst', 'LICENSE']: + touch(os.path.join(self.temp_dir, fname)) + + # Set up the rest of the test package + test_pkg = os.path.join(self.temp_dir, 'app') + os.mkdir(test_pkg) + for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']: + touch(os.path.join(test_pkg, fname)) + + # Some compiled front-end assets to include + static = os.path.join(test_pkg, 'static') + os.mkdir(static) + for fname in ['app.js', 'app.js.map', 'app.css', 'app.css.map']: + touch(os.path.join(static, fname)) + + self.old_cwd = os.getcwd() + os.chdir(self.temp_dir) + + def teardown_method(self, method): + os.chdir(self.old_cwd) + shutil.rmtree(self.temp_dir) + + def make_manifest(self, contents): + """Write a MANIFEST.in.""" + with open(os.path.join(self.temp_dir, 'MANIFEST.in'), 'w') as f: + f.write(DALS(contents)) + + def get_files(self): + """Run egg_info and get all the files to include, as a set""" + dist = Distribution(SETUP_ATTRS) + dist.script_name = 'setup.py' + cmd = egg_info(dist) + cmd.ensure_finalized() + + cmd.run() + + return set(cmd.filelist.files) + + def test_no_manifest(self): + """Check a missing MANIFEST.in includes only the standard files.""" + assert (default_files - set(['MANIFEST.in'])) == self.get_files() + + def test_empty_files(self): + """Check an empty MANIFEST.in includes only the standard files.""" + self.make_manifest("") + assert default_files == self.get_files() + + def test_include(self): + """Include extra rst files in the project root.""" + self.make_manifest("include *.rst") + files = default_files | set([ + 'testing.rst', '.hidden.rst']) + assert files == self.get_files() + + def test_exclude(self): + """Include everything in app/ except the text files""" + l = make_local_path + self.make_manifest( + """ + include app/* + exclude app/*.txt + """) + files = default_files | set([l('app/c.rst')]) + assert files == self.get_files() + + def test_include_multiple(self): + """Include with multiple patterns.""" + l = make_local_path + self.make_manifest("include app/*.txt app/static/*") + files = default_files | set([ + l('app/a.txt'), l('app/b.txt'), + l('app/static/app.js'), l('app/static/app.js.map'), + l('app/static/app.css'), l('app/static/app.css.map')]) + assert files == self.get_files() + + def test_graft(self): + """Include the whole app/static/ directory.""" + l = make_local_path + self.make_manifest("graft app/static") + files = default_files | set([ + l('app/static/app.js'), l('app/static/app.js.map'), + l('app/static/app.css'), l('app/static/app.css.map')]) + assert files == self.get_files() + + def test_graft_global_exclude(self): + """Exclude all *.map files in the project.""" + l = make_local_path + self.make_manifest( + """ + graft app/static + global-exclude *.map + """) + files = default_files | set([ + l('app/static/app.js'), l('app/static/app.css')]) + assert files == self.get_files() + + def test_global_include(self): + """Include all *.rst, *.js, and *.css files in the whole tree.""" + l = make_local_path + self.make_manifest( + """ + global-include *.rst *.js *.css + """) + files = default_files | set([ + '.hidden.rst', 'testing.rst', l('app/c.rst'), + l('app/static/app.js'), l('app/static/app.css')]) + assert files == self.get_files() + + def test_graft_prune(self): + """Include all files in app/, except for the whole app/static/ dir.""" + l = make_local_path + self.make_manifest( + """ + graft app + prune app/static + """) + files = default_files | set([ + l('app/a.txt'), l('app/b.txt'), l('app/c.rst')]) + assert files == self.get_files() -- cgit v1.2.1 From 498f86b8979b644234e8cfebfa990385504e926f Mon Sep 17 00:00:00 2001 From: Tim Heap Date: Tue, 16 Aug 2016 16:36:58 +1000 Subject: Copy FileList tests from distutils --- setuptools/tests/test_manifest.py | 283 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 274 insertions(+), 9 deletions(-) diff --git a/setuptools/tests/test_manifest.py b/setuptools/tests/test_manifest.py index abee5128..6e67ca61 100644 --- a/setuptools/tests/test_manifest.py +++ b/setuptools/tests/test_manifest.py @@ -6,8 +6,10 @@ import os import shutil import sys import tempfile +from distutils import log +from distutils.errors import DistutilsTemplateError -from setuptools.command.egg_info import egg_info +from setuptools.command.egg_info import FileList, egg_info from setuptools.dist import Distribution from setuptools.extern import six from setuptools.tests.textwrap import DALS @@ -63,10 +65,23 @@ default_files = frozenset(map(make_local_path, [ ])) -class TestManifestTest: +class TempDirTestCase(object): def setup_method(self, method): self.temp_dir = tempfile.mkdtemp() + self.old_cwd = os.getcwd() + os.chdir(self.temp_dir) + + def teardown_method(self, method): + os.chdir(self.old_cwd) + shutil.rmtree(self.temp_dir) + + +class TestManifestTest(TempDirTestCase): + + def setup_method(self, method): + super(TestManifestTest, self).setup_method(method) + f = open(os.path.join(self.temp_dir, 'setup.py'), 'w') f.write(SETUP_PY) f.close() @@ -104,13 +119,6 @@ class TestManifestTest: for fname in ['app.js', 'app.js.map', 'app.css', 'app.css.map']: touch(os.path.join(static, fname)) - self.old_cwd = os.getcwd() - os.chdir(self.temp_dir) - - def teardown_method(self, method): - os.chdir(self.old_cwd) - shutil.rmtree(self.temp_dir) - def make_manifest(self, contents): """Write a MANIFEST.in.""" with open(os.path.join(self.temp_dir, 'MANIFEST.in'), 'w') as f: @@ -208,3 +216,260 @@ class TestManifestTest: files = default_files | set([ l('app/a.txt'), l('app/b.txt'), l('app/c.rst')]) assert files == self.get_files() + + +class TestFileListTest(TempDirTestCase): + """ + A copy of the relevant bits of distutils/tests/test_filelist.py, + to ensure setuptools' version of FileList keeps parity with distutils. + """ + + def setup_method(self, method): + super(TestFileListTest, self).setup_method(method) + self.threshold = log.set_threshold(log.FATAL) + self._old_log = log.Log._log + log.Log._log = self._log + self.logs = [] + + def teardown_method(self, method): + log.set_threshold(self.threshold) + log.Log._log = self._old_log + super(TestFileListTest, self).teardown_method(method) + + def _log(self, level, msg, args): + if level not in (log.DEBUG, log.INFO, log.WARN, log.ERROR, log.FATAL): + raise ValueError('%s wrong log level' % str(level)) + self.logs.append((level, msg, args)) + + def get_logs(self, *levels): + def _format(msg, args): + if len(args) == 0: + return msg + return msg % args + return [_format(msg, args) for level, msg, args + in self.logs if level in levels] + + def clear_logs(self): + self.logs = [] + + def assertNoWarnings(self): + assert self.get_logs(log.WARN) == [] + self.clear_logs() + + def assertWarnings(self): + assert len(self.get_logs(log.WARN)) > 0 + self.clear_logs() + + def make_files(self, files): + for file in files: + file = os.path.join(self.temp_dir, file) + dirname, basename = os.path.split(file) + if not os.path.exists(dirname): + os.makedirs(dirname) + open(file, 'w').close() + + def test_process_template_line(self): + # testing all MANIFEST.in template patterns + file_list = FileList() + l = make_local_path + + # simulated file list + self.make_files([ + 'foo.tmp', 'ok', 'xo', 'four.txt', + 'buildout.cfg', + # filelist does not filter out VCS directories, + # it's sdist that does + l('.hg/last-message.txt'), + l('global/one.txt'), + l('global/two.txt'), + l('global/files.x'), + l('global/here.tmp'), + l('f/o/f.oo'), + l('dir/graft-one'), + l('dir/dir2/graft2'), + l('dir3/ok'), + l('dir3/sub/ok.txt'), + ]) + + MANIFEST_IN = DALS("""\ + include ok + include xo + exclude xo + include foo.tmp + include buildout.cfg + global-include *.x + global-include *.txt + global-exclude *.tmp + recursive-include f *.oo + recursive-exclude global *.x + graft dir + prune dir3 + """) + + for line in MANIFEST_IN.split('\n'): + if not line: + continue + file_list.process_template_line(line) + + wanted = [ + 'buildout.cfg', + 'four.txt', + 'ok', + l('.hg/last-message.txt'), + l('dir/graft-one'), + l('dir/dir2/graft2'), + l('f/o/f.oo'), + l('global/one.txt'), + l('global/two.txt'), + ] + file_list.sort() + + assert file_list.files == wanted + + def test_exclude_pattern(self): + # return False if no match + file_list = FileList() + assert not file_list.exclude_pattern('*.py') + + # return True if files match + file_list = FileList() + file_list.files = ['a.py', 'b.py'] + assert file_list.exclude_pattern('*.py') + + # test excludes + file_list = FileList() + file_list.files = ['a.py', 'a.txt'] + file_list.exclude_pattern('*.py') + assert file_list.files == ['a.txt'] + + def test_include_pattern(self): + # return False if no match + file_list = FileList() + file_list.set_allfiles([]) + assert not file_list.include_pattern('*.py') + + # return True if files match + file_list = FileList() + file_list.set_allfiles(['a.py', 'b.txt']) + assert file_list.include_pattern('*.py') + + # test * matches all files + file_list = FileList() + assert file_list.allfiles is None + file_list.set_allfiles(['a.py', 'b.txt']) + file_list.include_pattern('*') + assert file_list.allfiles == ['a.py', 'b.txt'] + + def test_process_template(self): + l = make_local_path + # invalid lines + file_list = FileList() + for action in ('include', 'exclude', 'global-include', + 'global-exclude', 'recursive-include', + 'recursive-exclude', 'graft', 'prune', 'blarg'): + try: + file_list.process_template_line(action) + except DistutilsTemplateError: + pass + except Exception: + assert False, "Incorrect error thrown" + else: + assert False, "Should have thrown an error" + + # include + file_list = FileList() + file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')]) + + file_list.process_template_line('include *.py') + assert file_list.files == ['a.py'] + self.assertNoWarnings() + + file_list.process_template_line('include *.rb') + assert file_list.files == ['a.py'] + self.assertWarnings() + + # exclude + file_list = FileList() + file_list.files = ['a.py', 'b.txt', l('d/c.py')] + + file_list.process_template_line('exclude *.py') + assert file_list.files == ['b.txt', l('d/c.py')] + self.assertNoWarnings() + + file_list.process_template_line('exclude *.rb') + assert file_list.files == ['b.txt', l('d/c.py')] + self.assertWarnings() + + # global-include + file_list = FileList() + file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')]) + + file_list.process_template_line('global-include *.py') + assert file_list.files == ['a.py', l('d/c.py')] + self.assertNoWarnings() + + file_list.process_template_line('global-include *.rb') + assert file_list.files == ['a.py', l('d/c.py')] + self.assertWarnings() + + # global-exclude + file_list = FileList() + file_list.files = ['a.py', 'b.txt', l('d/c.py')] + + file_list.process_template_line('global-exclude *.py') + assert file_list.files == ['b.txt'] + self.assertNoWarnings() + + file_list.process_template_line('global-exclude *.rb') + assert file_list.files == ['b.txt'] + self.assertWarnings() + + # recursive-include + file_list = FileList() + file_list.set_allfiles(['a.py', l('d/b.py'), l('d/c.txt'), + l('d/d/e.py')]) + + file_list.process_template_line('recursive-include d *.py') + assert file_list.files == [l('d/b.py'), l('d/d/e.py')] + self.assertNoWarnings() + + file_list.process_template_line('recursive-include e *.py') + assert file_list.files == [l('d/b.py'), l('d/d/e.py')] + self.assertWarnings() + + # recursive-exclude + file_list = FileList() + file_list.files = ['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')] + + file_list.process_template_line('recursive-exclude d *.py') + assert file_list.files == ['a.py', l('d/c.txt')] + self.assertNoWarnings() + + file_list.process_template_line('recursive-exclude e *.py') + assert file_list.files == ['a.py', l('d/c.txt')] + self.assertWarnings() + + # graft + file_list = FileList() + file_list.set_allfiles(['a.py', l('d/b.py'), l('d/d/e.py'), + l('f/f.py')]) + + file_list.process_template_line('graft d') + assert file_list.files == [l('d/b.py'), l('d/d/e.py')] + self.assertNoWarnings() + + file_list.process_template_line('graft e') + assert file_list.files == [l('d/b.py'), l('d/d/e.py')] + self.assertWarnings() + + # prune + file_list = FileList() + file_list.files = ['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')] + + file_list.process_template_line('prune d') + assert file_list.files == ['a.py', l('f/f.py')] + self.assertNoWarnings() + + file_list.process_template_line('prune e') + assert file_list.files == ['a.py', l('f/f.py')] + self.assertWarnings() -- cgit v1.2.1 From d806e4cf59b252eb2120483347c4f3772f4ca386 Mon Sep 17 00:00:00 2001 From: insiv Date: Wed, 17 Aug 2016 22:07:48 +0700 Subject: Remove whitespace around parameter = sign. --- pkg_resources/__init__.py | 2 +- pkg_resources/tests/test_resources.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 2a053b50..8d967c07 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -1913,7 +1913,7 @@ class EggMetadata(ZipProvider): self.module_path = importer.archive self._setup_prefix() -_declare_state('dict', _distribution_finders = {}) +_declare_state('dict', _distribution_finders={}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index e0dbb652..e4827fa5 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -114,7 +114,7 @@ class TestDistro: def testDistroMetadata(self): d = Distribution( "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", - metadata = Metadata( + metadata=Metadata( ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") ) ) -- cgit v1.2.1 From 37f1dd197243d18667b1d7fee489013d53b3bd06 Mon Sep 17 00:00:00 2001 From: insiv Date: Wed, 17 Aug 2016 22:24:43 +0700 Subject: Add missing whitespace around operators. --- pkg_resources/__init__.py | 56 +++++++++++++++++------------------ pkg_resources/tests/test_resources.py | 12 ++++---- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 2a053b50..a4704c72 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -155,7 +155,7 @@ class _SetuptoolsVersionMixin(object): # pad for numeric comparison yield part.zfill(8) else: - yield '*'+part + yield '*' + part # ensure that alpha/beta/candidate are before final yield '*final' @@ -217,13 +217,13 @@ def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): - state[k] = g['_sget_'+v](g[k]) + state[k] = g['_sget_' + v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): - g['_sset_'+_state_vars[k]](k, g[k], v) + g['_sset_' + _state_vars[k]](k, g[k], v) return state def _sget_dict(val): @@ -314,7 +314,7 @@ __all__ = [ class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): - return self.__class__.__name__+repr(self.args) + return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): @@ -477,7 +477,7 @@ def compatible_platforms(provided, required): XXX Needs compatibility checks for Linux and other unixy OSes. """ - if provided is None or required is None or provided==required: + if provided is None or required is None or provided == required: # easy case return True @@ -732,7 +732,7 @@ class WorkingSet(object): for key in self.entry_keys[item]: if key not in seen: - seen[key]=1 + seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): @@ -1033,7 +1033,7 @@ class Environment(object): is returned. """ return (self.python is None or dist.py_version is None - or dist.py_version==self.python) \ + or dist.py_version == self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): @@ -1238,7 +1238,7 @@ class ResourceManager: extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) except: @@ -1344,7 +1344,7 @@ def get_default_cache(): except KeyError: pass - if os.name!='nt': + if os.name != 'nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! @@ -1492,7 +1492,7 @@ class NullProvider: return [] def run_script(self, script_name, namespace): - script = 'scripts/'+script_name + script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') @@ -1553,7 +1553,7 @@ class EggProvider(NullProvider): # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None - while path!=old: + while path != old: if _is_unpacked_egg(path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') @@ -1679,7 +1679,7 @@ class ZipProvider(EggProvider): def __init__(self, module): EggProvider.__init__(self, module) - self.zip_pre = self.loader.archive+os.sep + self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath @@ -1693,9 +1693,9 @@ class ZipProvider(EggProvider): def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path - fspath = self.zip_pre+zip_path - if fspath.startswith(self.egg_root+os.sep): - return fspath[len(self.egg_root)+1:].split(os.sep) + fspath = self.zip_pre + zip_path + if fspath.startswith(self.egg_root + os.sep): + return fspath[len(self.egg_root) + 1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @@ -1766,7 +1766,7 @@ class ZipProvider(EggProvider): # so proceed. return real_path # Windows, del old file and retry - elif os.name=='nt': + elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path @@ -1786,7 +1786,7 @@ class ZipProvider(EggProvider): if not os.path.isfile(file_path): return False stat = os.stat(file_path) - if stat.st_size!=size or stat.st_mtime!=timestamp: + if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) @@ -1855,10 +1855,10 @@ class FileMetadata(EmptyProvider): self.path = path def has_metadata(self, name): - return name=='PKG-INFO' and os.path.isfile(self.path) + return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): - if name=='PKG-INFO': + if name == 'PKG-INFO': with io.open(self.path, encoding='utf-8') as f: try: metadata = f.read() @@ -1905,7 +1905,7 @@ class EggMetadata(ZipProvider): def __init__(self, importer): """Create a metadata provider from a zipimporter""" - self.zip_pre = importer.archive+os.sep + self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) @@ -2117,7 +2117,7 @@ def file_ns_handler(importer, path_item, packageName, module): subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: - if _normalize_cached(item)==normalized: + if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there @@ -2294,7 +2294,7 @@ class EntryPoint(object): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name]=ep + this[ep.name] = ep return this @classmethod @@ -2356,7 +2356,7 @@ class Distribution(object): @classmethod def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None]*4 + project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] @@ -2478,9 +2478,9 @@ class Distribution(object): extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn - reqs=[] + reqs = [] elif not evaluate_marker(marker): - reqs=[] + reqs = [] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm @@ -2611,7 +2611,7 @@ class Distribution(object): nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) - npath= [(p and _normalize_cached(p) or p) for p in path] + npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: @@ -2642,7 +2642,7 @@ class Distribution(object): # p is the spot where we found or inserted loc; now remove duplicates while True: try: - np = npath.index(nloc, p+1) + np = npath.index(nloc, p + 1) except ValueError: break else: @@ -2981,7 +2981,7 @@ def _initialize_master_working_set(): dist.activate(replace=False) del dist add_activation_listener(lambda dist: dist.activate(replace=True), existing=False) - working_set.entries=[] + working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index e0dbb652..3649a30a 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -164,7 +164,7 @@ class TestDistro: ad.add(Baz) # Activation list now includes resolved dependency - assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) ==[Foo,Baz] + assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) == [Foo,Baz] # Requests for conflicting versions produce VersionConflict with pytest.raises(VersionConflict) as vc: ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad) @@ -426,7 +426,7 @@ class TestEntryPoints: m = EntryPoint.parse_map({'xyz':self.submap_str}) self.checkSubMap(m['xyz']) assert list(m.keys()) == ['xyz'] - m = EntryPoint.parse_map("[xyz]\n"+self.submap_str) + m = EntryPoint.parse_map("[xyz]\n" + self.submap_str) self.checkSubMap(m['xyz']) assert list(m.keys()) == ['xyz'] with pytest.raises(ValueError): @@ -644,7 +644,7 @@ class TestParsing: def testVersionOrdering(self): def c(s1,s2): p1, p2 = parse_version(s1),parse_version(s2) - assert p1 Date: Wed, 17 Aug 2016 22:47:37 +0700 Subject: Fix spacing after comment hash. --- pkg_resources/tests/test_resources.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index e0dbb652..854bc327 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -153,7 +153,7 @@ class TestDistro: list(map(ws.add, targets)) with pytest.raises(VersionConflict): ws.resolve(parse_requirements("Foo==0.9"), ad) - ws = WorkingSet([]) # reset + ws = WorkingSet([]) # reset # Request an extra that causes an unresolved dependency for "Baz" with pytest.raises(pkg_resources.DistributionNotFound): -- cgit v1.2.1 From bbfaa7817d92bc2e04e4b4fa89d2b9a783f053cd Mon Sep 17 00:00:00 2001 From: insiv Date: Wed, 17 Aug 2016 22:51:31 +0700 Subject: Fix comparison with None. --- pkg_resources/tests/test_resources.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index e0dbb652..963308b2 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -103,7 +103,7 @@ class TestDistro: d = Distribution("/some/path") assert d.py_version == sys.version[:3] - assert d.platform == None + assert d.platform is None def testDistroParse(self): d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg") -- cgit v1.2.1 From 84aec138dc4311c3f23ff78e9916e00b457a1896 Mon Sep 17 00:00:00 2001 From: insiv Date: Wed, 17 Aug 2016 23:05:38 +0700 Subject: Upgrade pyparsing to version 2.1.8 --- pkg_resources/_vendor/pyparsing.py | 3358 +++++++++++++++++++++++++++--------- pkg_resources/_vendor/vendored.txt | 2 +- 2 files changed, 2565 insertions(+), 795 deletions(-) diff --git a/pkg_resources/_vendor/pyparsing.py b/pkg_resources/_vendor/pyparsing.py index 2284cadc..89cffc10 100644 --- a/pkg_resources/_vendor/pyparsing.py +++ b/pkg_resources/_vendor/pyparsing.py @@ -48,7 +48,7 @@ The program outputs the following:: The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. -The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an +The parsed results returned from L{I{ParserElement.parseString}} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: @@ -57,8 +57,8 @@ The pyparsing module handles some of the problems that are typically vexing when - embedded comments """ -__version__ = "2.0.6" -__versionTime__ = "9 Nov 2015 19:03" +__version__ = "2.1.8" +__versionTime__ = "14 Aug 2016 08:43 UTC" __author__ = "Paul McGuire " import string @@ -70,8 +70,22 @@ import re import sre_constants import collections import pprint -import functools -import itertools +import traceback +import types +from datetime import datetime + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None #~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) @@ -81,21 +95,23 @@ __all__ = [ 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', +'tokenMap', 'pyparsing_common', ] -PY_3 = sys.version.startswith('3') +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 if PY_3: _MAX_INT = sys.maxsize basestring = str @@ -123,18 +139,11 @@ else: return str(obj) except UnicodeEncodeError: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex('&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] @@ -160,7 +169,7 @@ def _xml_escape(data): class _Constants(object): pass -alphas = string.ascii_lowercase + string.ascii_uppercase +alphas = string.ascii_uppercase + string.ascii_lowercase nums = "0123456789" hexnums = nums + "ABCDEFabcdef" alphanums = alphas + nums @@ -180,6 +189,15 @@ class ParseBaseException(Exception): self.msg = msg self.pstr = pstr self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) def __getattr__( self, aname ): """supported attributes by name are: @@ -212,15 +230,26 @@ class ParseBaseException(Exception): markerString, line_str[line_column:])) return line_str.strip() def __dir__(self): - return "loc msg pstr parserElement lineno col line " \ - "markInputline __str__ __repr__".split() + return "lineno col line".split() + dir(type(self)) class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + Expected integer (at char 0), (line:1, col:1) + column: 1 """ pass @@ -230,12 +259,10 @@ class ParseFatalException(ParseBaseException): pass class ParseSyntaxException(ParseFatalException): - """just like C{L{ParseFatalException}}, but thrown internally when an - C{L{ErrorStop}} ('-' operator) indicates that parsing is to stop immediately because - an unbacktrackable syntax error has been found""" - def __init__(self, pe): - super(ParseSyntaxException, self).__init__( - pe.pstr, pe.loc, pe.msg, pe.parserElement) + """just like L{ParseFatalException}, but thrown internally when an + L{ErrorStop} ('-' operator) indicates that parsing is to stop + immediately because an unbacktrackable syntax error has been found""" + pass #~ class ReparseException(ParseBaseException): #~ """Experimental class - parse actions can raise this exception to cause @@ -251,7 +278,7 @@ class ParseSyntaxException(ParseFatalException): #~ self.reparseLoc = restartLoc class RecursiveGrammarException(Exception): - """exception thrown by C{validate()} if the grammar could be improperly recursive""" + """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" def __init__( self, parseElementList ): self.parseElementTrace = parseElementList @@ -269,12 +296,44 @@ class _ParseResultsWithOffset(object): self.tup = (self.tup[0],i) class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: + """ + Structured parse results, to provide multiple means of access to the parsed data: - as a list (C{len(results)}) - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.}) - """ - def __new__(cls, toklist, name=None, asList=True, modal=True ): + - by attribute (C{results.} - see L{ParserElement.setResultsName}) + + Example:: + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + prints:: + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True ): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) @@ -283,12 +342,16 @@ class ParseResults(object): # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ): + def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] if isinstance(toklist, list): self.__toklist = toklist[:] elif isinstance(toklist, _generatorType): @@ -331,7 +394,7 @@ class ParseResults(object): if isinstance(v,_ParseResultsWithOffset): self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] sub = v[0] - elif isinstance(k,int): + elif isinstance(k,(int,slice)): self.__toklist[k] = v sub = v else: @@ -354,11 +417,6 @@ class ParseResults(object): removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary - #~ for name in self.__tokdict: - #~ occurrences = self.__tokdict[name] - #~ for j in removed: - #~ for k, (value, position) in enumerate(occurrences): - #~ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) for name,occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): @@ -370,39 +428,52 @@ class ParseResults(object): return k in self.__tokdict def __len__( self ): return len( self.__toklist ) - def __bool__(self): return len( self.__toklist ) > 0 + def __bool__(self): return ( not not self.__toklist ) __nonzero__ = __bool__ def __iter__( self ): return iter( self.__toklist ) def __reversed__( self ): return iter( self.__toklist[::-1] ) - def iterkeys( self ): - """Returns all named result keys.""" + def _iterkeys( self ): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) - def itervalues( self ): - """Returns all named result values.""" - return (self[k] for k in self.iterkeys()) + def _itervalues( self ): + return (self[k] for k in self._iterkeys()) - def iteritems( self ): - return ((k, self[k]) for k in self.iterkeys()) + def _iteritems( self ): + return ((k, self[k]) for k in self._iterkeys()) if PY_3: - keys = iterkeys - values = itervalues - items = iteritems + keys = _iterkeys + """Returns an iterator of all named result keys (Python 3.x only).""" + + values = _itervalues + """Returns an iterator of all named result values (Python 3.x only).""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples (Python 3.x only).""" + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + def keys( self ): - """Returns all named result keys.""" + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) def values( self ): - """Returns all named result values.""" + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) def items( self ): - """Returns all named result keys and values as a list of tuples.""" + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) def haskeys( self ): @@ -411,14 +482,39 @@ class ParseResults(object): return bool(self.__tokdict) def pop( self, *args, **kwargs): - """Removes and returns item at specified index (default=last). - Supports both list and dict semantics for pop(). If passed no - argument or an integer argument, it will use list semantics - and pop tokens from the list of parsed tokens. If passed a - non-integer argument (most likely a string), it will use dict - semantics and pop the corresponding value from any defined - results names. A second default return value argument is - supported, just as in dict.pop().""" + """ + Removes and returns item at specified index (default=C{last}). + Supports both C{list} and C{dict} semantics for C{pop()}. If passed no + argument or an integer argument, it will use C{list} semantics + and pop tokens from the list of parsed tokens. If passed a + non-integer argument (most likely a string), it will use C{dict} + semantics and pop the corresponding value from any defined + results names. A second default return value argument is + supported, just as in C{dict.pop()}. + + Example:: + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + prints:: + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ if not args: args = [-1] for k,v in kwargs.items(): @@ -438,39 +534,83 @@ class ParseResults(object): return defaultvalue def get(self, key, defaultValue=None): - """Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified.""" + """ + Returns named result matching the given key, or if there is no + such name, then returns the given C{defaultValue} or C{None} if no + C{defaultValue} is specified. + + Similar to C{dict.get()}. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ if key in self: return self[key] else: return defaultValue def insert( self, index, insStr ): - """Inserts new element at location index in the list of parsed tokens.""" + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to C{list.insert()}. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary - #~ for name in self.__tokdict: - #~ occurrences = self.__tokdict[name] - #~ for k, (value, position) in enumerate(occurrences): - #~ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) for name,occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) def append( self, item ): - """Add single element to end of ParseResults list of elements.""" + """ + Add single element to end of ParseResults list of elements. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ self.__toklist.append(item) def extend( self, itemseq ): - """Add sequence of elements to end of ParseResults list of elements.""" + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ if isinstance(itemseq, ParseResults): self += itemseq else: self.__toklist.extend(itemseq) def clear( self ): - """Clear all elements and results names.""" + """ + Clear all elements and results names. + """ del self.__toklist[:] self.__tokdict.clear() @@ -511,7 +651,11 @@ class ParseResults(object): def __radd__(self, other): if isinstance(other,int) and other == 0: + # useful for merging many ParseResults using sum() builtin return self.copy() + else: + # this may raise a TypeError - so be it + return other + self def __repr__( self ): return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) @@ -531,18 +675,60 @@ class ParseResults(object): return out def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] def asDict( self ): - """Returns the named parse results as dictionary.""" + """ + Returns the named parse results as a nested dictionary. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ if PY_3: - return dict( self.items() ) + item_fn = self.items else: - return dict( self.iteritems() ) + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k,toItem(v)) for k,v in item_fn()) def copy( self ): - """Returns a new copy of a C{ParseResults} object.""" + """ + Returns a new copy of a C{ParseResults} object. + """ ret = ParseResults( self.__toklist ) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent @@ -551,7 +737,9 @@ class ParseResults(object): return ret def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ nl = "\n" out = [] namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() @@ -617,7 +805,27 @@ class ParseResults(object): return None def getName(self): - """Returns the results name for this token expression.""" + """ + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + prints:: + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ if self.__name: return self.__name elif self.__parent: @@ -633,40 +841,72 @@ class ParseResults(object): else: return None - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data.""" + def dump(self, indent='', depth=0, full=True): + """ + Diagnostic method for listing out the contents of a C{ParseResults}. + Accepts an optional C{indent} argument so that this string can be embedded + in a nested display of other data. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + prints:: + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ out = [] NL = '\n' out.append( indent+_ustr(self.asList()) ) - if self.haskeys(): - items = sorted(self.items()) - for k,v in items: - if out: - out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v: - out.append( v.dump(indent,depth+1) ) + if full: + if self.haskeys(): + items = sorted(self.items()) + for k,v in items: + if out: + out.append(NL) + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v: + out.append( v.dump(indent,depth+1) ) + else: + out.append(_ustr(v)) else: out.append(_ustr(v)) - else: - out.append(_ustr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): - v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + elif any(isinstance(vv,ParseResults) for vv in self): + v = self + for i,vv in enumerate(v): + if isinstance(vv,ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) return "".join(out) def pprint(self, *args, **kwargs): - """Pretty-printer for parsed results as a list, using the C{pprint} module. - Accepts additional positional or keyword args as defined for the - C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})""" + """ + Pretty-printer for parsed results as a list, using the C{pprint} module. + Accepts additional positional or keyword args as defined for the + C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) + + Example:: + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + prints:: + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ pprint.pprint(self.asList(), *args, **kwargs) # add support for pickle protocol @@ -690,8 +930,11 @@ class ParseResults(object): else: self.__parent = None + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal + def __dir__(self): - return dir(super(ParseResults,self)) + list(self.keys()) + return (dir(type(self)) + list(self.keys())) collections.MutableMapping.register(ParseResults) @@ -771,6 +1014,31 @@ def _trim_arity(func, maxargs=2): return lambda s,l,t: func(t) limit = [0] foundArity = [False] + + # traceback return data structure changed in Py3.5 - normalize back to plain tuples + if system_version[:2] >= (3,5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3,5,0) else -2 + frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + return [(frame_summary.filename, frame_summary.lineno)] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [(frame_summary.filename, frame_summary.lineno)] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + def wrapper(*args): while 1: try: @@ -778,12 +1046,33 @@ def _trim_arity(func, maxargs=2): foundArity[0] = True return ret except TypeError: - if limit[0] <= maxargs and not foundArity[0]: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + del tb + + if limit[0] <= maxargs: limit[0] += 1 continue raise + + # copy func name to wrapper for sensible debug output + func_name = "" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + return wrapper - + class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" @@ -791,7 +1080,16 @@ class ParserElement(object): @staticmethod def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars + r""" + Overrides the default whitespace chars + + Example:: + # default whitespace chars are space, and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars @@ -799,8 +1097,22 @@ class ParserElement(object): def inlineLiteralsUsing(cls): """ Set class to be used for inclusion of string literals into a parser. + + Example:: + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] """ - ParserElement.literalStringClass = cls + ParserElement._literalStringClass = cls def __init__( self, savelist=False ): self.parseAction = list() @@ -826,8 +1138,21 @@ class ParserElement(object): self.callDuringTry = False def copy( self ): - """Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" + """ + Make a copy of this C{ParserElement}. Useful for defining different parse actions + for the same parsing pattern, using copies of the original parse element. + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + prints:: + [5120, 100, 655360, 268435456] + Equivalent form of C{expr.copy()} is just C{expr()}:: + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + """ cpy = copy.copy( self ) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] @@ -836,7 +1161,13 @@ class ParserElement(object): return cpy def setName( self, name ): - """Define name for this expression, for use in debugging.""" + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): @@ -844,15 +1175,24 @@ class ParserElement(object): return self def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original C{ParserElement} object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + C{expr("name")} in place of C{expr.setResultsName("name")} - + see L{I{__call__}<__call__>}. + + Example:: + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ newself = self.copy() if name.endswith("*"): @@ -881,42 +1221,76 @@ class ParserElement(object): return self def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing C{}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ + """ + Define action to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, + C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + Optional keyword arguments: + - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{parseString}} for more information + on parsing strings containing C{}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + + Example:: + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ self.parseAction = list(map(_trim_arity, list(fns))) self.callDuringTry = kwargs.get("callDuringTry", False) return self def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" + """ + Add parse action to expression's list of parse actions. See L{I{setParseAction}}. + + See examples in L{I{copy}}. + """ self.parseAction += list(map(_trim_arity, list(fns))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def addCondition(self, *fns, **kwargs): """Add a boolean predicate function to expression's list of parse actions. See - L{I{setParseAction}}. Optional keyword argument C{message} can - be used to define a custom message to be used in the raised exception.""" - msg = kwargs.get("message") or "failed user-defined condition" + L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, + functions passed to C{addCondition} need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + msg = kwargs.get("message", "failed user-defined condition") + exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: def pa(s,l,t): if not bool(_trim_arity(fn)(s,l,t)): - raise ParseException(s,l,msg) - return t + raise exc_type(s,l,msg) self.parseAction.append(pa) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self @@ -1043,43 +1417,132 @@ class ParserElement(object): return self._parse( instring, loc, doActions=False )[0] except ParseFatalException: raise ParseException( instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + if len(cache) > size: + cache.popitem(False) + + def clear(self): + cache.clear() + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + if len(cache) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value, Exception): - raise value - return (value[0],value[1].copy()) - else: - try: - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) - return value - except ParseBaseException as pe: - pe.__traceback__ = None - ParserElement._exprArgCache[ lookup ] = pe - raise + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return (value[0], value[1].copy()) _parse = _parseNoCache - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} @staticmethod def resetCache(): - ParserElement._exprArgCache.clear() + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) _packratEnabled = False @staticmethod - def enablePackrat(): + def enablePackrat(cache_size_limit=128): """Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. - + + Parameters: + - cache_size_limit - (default=C{128}) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your @@ -1088,32 +1551,45 @@ class ParserElement(object): C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. + + Example:: + import pyparsing + pyparsing.ParserElement.enablePackrat() """ if not ParserElement._packratEnabled: ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache def parseString( self, instring, parseAll=False ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{L{StringEnd()}}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explicitly expand the tabs in your input string before calling - C{parseString} + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + If you want the grammar to require that the entire input string be + successfully parsed, then set C{parseAll} to True (equivalent to ending + the grammar with C{L{StringEnd()}}). + + Note: C{parseString} implicitly calls C{expandtabs()} on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the C{loc} argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + - calling C{parseWithTabs} on your grammar before calling C{parseString} + (see L{I{parseWithTabs}}) + - define your parse action using the full C{(s,loc,toks)} signature, and + reference the input string using the parse action's C{s} argument + - explictly expand the tabs in your input string before calling + C{parseString} + + Example:: + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text """ ParserElement.resetCache() if not self.streamlined: @@ -1139,14 +1615,35 @@ class ParserElement(object): return tokens def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing - strings with embedded tabs.""" + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + C{maxMatches} argument, to clip scanning after 'n' matches are found. If + C{overlap} is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See L{I{parseString}} for more information on parsing + strings with embedded tabs. + + Example:: + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens,start,end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ if not self.streamlined: self.streamline() for e in self.ignoreExprs: @@ -1189,12 +1686,22 @@ class ParserElement(object): raise exc def transformString( self, instring ): - """Extension to C{L{scanString}}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string.""" + """ + Extension to C{L{scanString}}, to modify matching text with modified tokens that may + be returned from a parse action. To use C{transformString}, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking C{transformString()} on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. C{transformString()} returns the resulting transformed string. + + Example:: + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + Prints:: + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ out = [] lastE = 0 # force preservation of s, to minimize unwanted transformation of string, and to @@ -1222,9 +1729,18 @@ class ParserElement(object): raise exc def searchString( self, instring, maxMatches=_MAX_INT ): - """Another extension to C{L{scanString}}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. + """ + Another extension to C{L{scanString}}, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + C{maxMatches} argument, to clip searching after 'n' matches are found. + + Example:: + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + prints:: + ['More', 'Iron', 'Lead', 'Gold', 'I'] """ try: return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) @@ -1235,10 +1751,34 @@ class ParserElement(object): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional C{maxsplit} argument, to limit the number of splits; + and the optional C{includeSeparators} argument (default=C{False}), if the separating + matching text should be included in the split results. + + Example:: + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + prints:: + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t,s,e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + def __add__(self, other ): - """Implementation of + operator - returns C{L{And}}""" + """ + Implementation of + operator - returns C{L{And}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1246,9 +1786,11 @@ class ParserElement(object): return And( [ self, other ] ) def __radd__(self, other ): - """Implementation of + operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of + operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1256,9 +1798,11 @@ class ParserElement(object): return other + self def __sub__(self, other): - """Implementation of - operator, returns C{L{And}} with error stop""" + """ + Implementation of - operator, returns C{L{And}} with error stop + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1266,9 +1810,11 @@ class ParserElement(object): return And( [ self, And._ErrorStop(), other ] ) def __rsub__(self, other ): - """Implementation of - operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of - operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1276,24 +1822,24 @@ class ParserElement(object): return other - self def __mul__(self,other): - """Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent + """ + Implementation of * operator, allows use of C{expr * 3} in place of + C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer + tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples + may also include C{None} as in: + - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} + - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - + - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} + - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} + + Note that C{expr*(None,n)} does not raise an exception if + more than n exprs exist in the input stream; that is, + C{expr*(None,n)} does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + C{expr*(None,n) + ~expr} """ if isinstance(other,int): minElements, optElements = other,0 @@ -1347,9 +1893,11 @@ class ParserElement(object): return self.__mul__(other) def __or__(self, other ): - """Implementation of | operator - returns C{L{MatchFirst}}""" + """ + Implementation of | operator - returns C{L{MatchFirst}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1357,9 +1905,11 @@ class ParserElement(object): return MatchFirst( [ self, other ] ) def __ror__(self, other ): - """Implementation of | operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of | operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1367,9 +1917,11 @@ class ParserElement(object): return other | self def __xor__(self, other ): - """Implementation of ^ operator - returns C{L{Or}}""" + """ + Implementation of ^ operator - returns C{L{Or}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1377,9 +1929,11 @@ class ParserElement(object): return Or( [ self, other ] ) def __rxor__(self, other ): - """Implementation of ^ operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of ^ operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1387,9 +1941,11 @@ class ParserElement(object): return other ^ self def __and__(self, other ): - """Implementation of & operator - returns C{L{Each}}""" + """ + Implementation of & operator - returns C{L{Each}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1397,9 +1953,11 @@ class ParserElement(object): return Each( [ self, other ] ) def __rand__(self, other ): - """Implementation of & operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of & operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1407,41 +1965,49 @@ class ParserElement(object): return other & self def __invert__( self ): - """Implementation of ~ operator - returns C{L{NotAny}}""" + """ + Implementation of ~ operator - returns C{L{NotAny}} + """ return NotAny( self ) def __call__(self, name=None): - """Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}:: - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - could be written as:: - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. + """ + Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}. + + If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be + passed as C{True}. - If C{name} is omitted, same as calling C{L{copy}}. - """ + If C{name} is omitted, same as calling C{L{copy}}. + + Example:: + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ if name is not None: return self.setResultsName(name) else: return self.copy() def suppress( self ): - """Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. + """ + Suppresses the output of this C{ParserElement}; useful to keep punctuation from + cluttering up returned output. """ return Suppress( self ) def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + Disables the skipping of whitespace before matching the characters in the + C{ParserElement}'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False return self def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars + """ + Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars @@ -1449,26 +2015,41 @@ class ParserElement(object): return self def parseWithTabs( self ): - """Overrides default behavior to expand C{}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{} characters.""" + """ + Overrides default behavior to expand C{}s to spaces before parsing the input string. + Must be called before C{parseString} when the input grammar contains elements that + match C{} characters. + """ self.keepTabs = True return self def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + if isinstance( other, Suppress ): if other not in self.ignoreExprs: - self.ignoreExprs.append( other.copy() ) + self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" + """ + Enable display of debugging messages while doing pattern matching. + """ self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) @@ -1476,8 +2057,39 @@ class ParserElement(object): return self def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable.""" + """ + Enable display of debugging messages while doing pattern matching. + Set C{flag} to True to enable, False to disable. + + Example:: + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions. Prior to attempting + to match the C{wd} expression, the debugging message C{"Match at loc (,)"} + is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} + message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. + """ if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: @@ -1499,20 +2111,22 @@ class ParserElement(object): pass def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ self.checkRecursion( [] ) def parseFile( self, file_or_filename, parseAll=False ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. """ try: file_contents = file_or_filename.read() except AttributeError: - f = open(file_or_filename, "r") - file_contents = f.read() - f.close() + with open(file_or_filename, "r") as f: + file_contents = f.read() try: return self.parseString(file_contents, parseAll) except ParseBaseException as exc: @@ -1524,13 +2138,9 @@ class ParserElement(object): def __eq__(self,other): if isinstance(other, ParserElement): - return self is other or self.__dict__ == other.__dict__ + return self is other or vars(self) == vars(other) elif isinstance(other, basestring): - try: - self.parseString(_ustr(other), parseAll=True) - return True - except ParseBaseException: - return False + return self.matches(other) else: return super(ParserElement,self)==other @@ -1546,40 +2156,161 @@ class ParserElement(object): def __rne__(self,other): return not (self == other) - def runTests(self, tests, parseAll=False): - """Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser.0 - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default=False) - flag to pass to C{L{parseString}} when running tests + Parameters: + - testString - to test against this expression for a match + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + + Example:: + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + - comment - (default=C{'#'}) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default=C{True}) prints test output to stdout + - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if C{failureTests} is True), and the results contain a list of lines of each + test's output + + Example:: + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + prints:: + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success """ if isinstance(tests, basestring): - tests = map(str.strip, tests.splitlines()) + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + allResults = [] + comments = [] + success = True for t in tests: - out = [t] + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n'.join(comments), t] + comments = [] try: - out.append(self.parseString(t, parseAll=parseAll).dump()) - except ParseException as pe: + result = self.parseString(t, parseAll=parseAll) + out.append(result.dump(full=fullDump)) + success = success and not failureTests + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^') + out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) else: - out.append(' '*pe.loc + '^') - out.append(str(pe)) - out.append('') - print('\n'.join(out)) + out.append(' '*pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + + if printResults: + if fullDump: + out.append('') + print('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults class Token(ParserElement): - """Abstract C{ParserElement} subclass, for defining atomic matching patterns.""" + """ + Abstract C{ParserElement} subclass, for defining atomic matching patterns. + """ def __init__( self ): super(Token,self).__init__( savelist=False ) class Empty(Token): - """An empty token, will always match.""" + """ + An empty token, will always match. + """ def __init__( self ): super(Empty,self).__init__() self.name = "Empty" @@ -1588,7 +2319,9 @@ class Empty(Token): class NoMatch(Token): - """A token that will never match.""" + """ + A token that will never match. + """ def __init__( self ): super(NoMatch,self).__init__() self.name = "NoMatch" @@ -1601,7 +2334,19 @@ class NoMatch(Token): class Literal(Token): - """Token to exactly match a specified string.""" + """ + Token to exactly match a specified string. + + Example:: + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use L{CaselessLiteral}. + + For keyword matching (force word break before and after the matched string), + use L{Keyword} or L{CaselessKeyword}. + """ def __init__( self, matchString ): super(Literal,self).__init__() self.match = matchString @@ -1627,17 +2372,24 @@ class Literal(Token): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) _L = Literal -ParserElement.literalStringClass = Literal +ParserElement._literalStringClass = Literal class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{L{Literal}}:: - Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}. - Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive - matching, default is C{False}. + """ + Token to exactly match a specified string as a keyword, that is, it must be + immediately followed by a non-keyword character. Compare with C{L{Literal}}: + - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. + - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} + Accepts two optional constructor arguments in addition to the keyword string: + - C{identChars} is a string of characters that would be valid identifier characters, + defaulting to all alphanumerics + "_" and "$" + - C{caseless} allows case-insensitive matching, default is C{False}. + + Example:: + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use L{CaselessKeyword}. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" @@ -1686,9 +2438,15 @@ class Keyword(Token): Keyword.DEFAULT_KEYWORD_CHARS = chars class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for L{CaselessKeyword}.) """ def __init__( self, matchString ): super(CaselessLiteral,self).__init__( matchString.upper() ) @@ -1703,6 +2461,14 @@ class CaselessLiteral(Literal): raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): + """ + Caseless version of L{Keyword}. + + Example:: + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for L{CaselessLiteral}.) + """ def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) @@ -1713,16 +2479,51 @@ class CaselessKeyword(Keyword): raise ParseException(instring, loc, self.errmsg, self) class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. An optional - C{exclude} parameter can list characters that might be found in - the input C{bodyChars} string; useful to define a word of all printables - except for one or two characters, for instance. + """ + Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, + an optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. An optional + C{excludeChars} parameter can list characters that might be found in + the input C{bodyChars} string; useful to define a word of all printables + except for one or two characters, for instance. + + L{srange} is useful for defining custom character set strings for defining + C{Word} expressions, using range notation from regular expression character sets. + + A common mistake is to use C{Word} to match a specific literal string, as in + C{Word("Address")}. Remember that C{Word} uses the string argument to define + I{sets} of matchable characters. This expression would match "Add", "AAA", + "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. + To match an exact literal string, use L{Literal} or L{Keyword}. + + pyparsing includes helper strings for building Words: + - L{alphas} + - L{nums} + - L{alphanums} + - L{hexnums} + - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) + - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - L{printables} (any non-whitespace character) + + Example:: + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums+'-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") """ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): super(Word,self).__init__() @@ -1837,8 +2638,17 @@ class Word(Token): class Regex(Token): - """Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + """ + Token for matching strings that match a given regular expression. + Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as + named parse results. + + Example:: + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)') + # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) def __init__( self, pattern, flags=0): @@ -1846,7 +2656,7 @@ class Regex(Token): super(Regex,self).__init__() if isinstance(pattern, basestring): - if len(pattern) == 0: + if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2) @@ -1901,23 +2711,36 @@ class Regex(Token): class QuotedString(Token): - """Token for matching strings that are delimited by quoting characters. + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + - quoteChar - string of one or more characters defining the quote delimiting string + - escChar - character to escape quotes, typically backslash (default=C{None}) + - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) + - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) + - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) + - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) + + Example:: + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + prints:: + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): - """ - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=None) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - """ + def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): super(QuotedString,self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() - if len(quoteChar) == 0: + if not quoteChar: warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() @@ -1925,7 +2748,7 @@ class QuotedString(Token): endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() - if len(endQuoteChar) == 0: + if not endQuoteChar: warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() @@ -1937,6 +2760,7 @@ class QuotedString(Token): self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes if multiline: self.flags = re.MULTILINE | re.DOTALL @@ -1990,6 +2814,17 @@ class QuotedString(Token): ret = ret[self.quoteCharLen:-self.endQuoteCharLen] if isinstance(ret,basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t' : '\t', + r'\n' : '\n', + r'\f' : '\f', + r'\r' : '\r', + } + for wslit,wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + # replace escaped characters if self.escChar: ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) @@ -2013,11 +2848,20 @@ class QuotedString(Token): class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given set. - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. + """ + Token for matching words composed of characters I{not} in a given set (will + include whitespace in matched characters if not listed in the provided exclusion set - see example). + Defined with string containing all disallowed characters, and an optional + minimum, maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. + + Example:: + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + prints:: + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ def __init__( self, notChars, min=1, max=0, exact=0 ): super(CharsNotIn,self).__init__() @@ -2075,11 +2919,13 @@ class CharsNotIn(Token): return self.strRepr class White(Token): - """Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{L{Word}} class.""" + """ + Special matching class for matching whitespace. Normally, whitespace is ignored + by pyparsing grammars. This class is included when some whitespace structures + are significant. Define with a string containing the whitespace characters to be + matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, + as defined for the C{L{Word}} class. + """ whiteStrs = { " " : "", "\t": "", @@ -2131,7 +2977,9 @@ class _PositionToken(Token): self.mayIndexError = False class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for tabular report scraping.""" + """ + Token to advance to a specific column of input text; useful for tabular report scraping. + """ def __init__( self, colno ): super(GoToColumn,self).__init__() self.col = colno @@ -2154,7 +3002,9 @@ class GoToColumn(_PositionToken): return newloc, ret class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within the parse string""" + """ + Matches if current position is at the beginning of a line within the parse string + """ def __init__( self ): super(LineStart,self).__init__() self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) @@ -2174,7 +3024,9 @@ class LineStart(_PositionToken): return loc, [] class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the parse string""" + """ + Matches if current position is at the end of a line within the parse string + """ def __init__( self ): super(LineEnd,self).__init__() self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) @@ -2192,7 +3044,9 @@ class LineEnd(_PositionToken): raise ParseException(instring, loc, self.errmsg, self) class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse string""" + """ + Matches if current position is at the beginning of the parse string + """ def __init__( self ): super(StringStart,self).__init__() self.errmsg = "Expected start of text" @@ -2205,7 +3059,9 @@ class StringStart(_PositionToken): return loc, [] class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string""" + """ + Matches if current position is at the end of the parse string + """ def __init__( self ): super(StringEnd,self).__init__() self.errmsg = "Expected end of text" @@ -2221,11 +3077,12 @@ class StringEnd(_PositionToken): raise ParseException(instring, loc, self.errmsg, self) class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. + """ + Matches if the current position is at the beginning of a Word, and + is not preceded by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of + the string being parsed, or at the beginning of a line. """ def __init__(self, wordChars = printables): super(WordStart,self).__init__() @@ -2240,11 +3097,12 @@ class WordStart(_PositionToken): return loc, [] class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. + """ + Matches if the current position is at the end of a Word, and + is not followed by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of + the string being parsed, or at the end of a line. """ def __init__(self, wordChars = printables): super(WordEnd,self).__init__() @@ -2262,18 +3120,21 @@ class WordEnd(_PositionToken): class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and post-processing parsed tokens.""" + """ + Abstract subclass of ParserElement, for combining and post-processing parsed tokens. + """ def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) if isinstance( exprs, _generatorType ): exprs = list(exprs) if isinstance( exprs, basestring ): - self.exprs = [ Literal( exprs ) ] - elif isinstance( exprs, collections.Sequence ): + self.exprs = [ ParserElement._literalStringClass( exprs ) ] + elif isinstance( exprs, collections.Iterable ): + exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(Literal, exprs) + exprs = map(ParserElement._literalStringClass, exprs) self.exprs = list(exprs) else: try: @@ -2351,7 +3212,7 @@ class ParseExpression(ParserElement): self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError - self.errmsg = "Expected " + str(self) + self.errmsg = "Expected " + _ustr(self) return self @@ -2371,9 +3232,19 @@ class ParseExpression(ParserElement): return ret class And(ParseExpression): - """Requires all given C{ParseExpression}s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the C{'+'} operator. + """ + Requires all given C{ParseExpression}s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the C{'+'} operator. + May also be constructed using the C{'-'} operator, which will suppress backtracking. + + Example:: + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"),name_expr("name"),integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): @@ -2405,9 +3276,9 @@ class And(ParseExpression): raise except ParseBaseException as pe: pe.__traceback__ = None - raise ParseSyntaxException(pe) + raise ParseSyntaxException._from_exception(pe) except IndexError: - raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) ) + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: loc, exprtokens = e._parse( instring, loc, doActions ) if exprtokens or exprtokens.haskeys(): @@ -2416,7 +3287,7 @@ class And(ParseExpression): def __iadd__(self, other ): if isinstance( other, basestring ): - other = Literal( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #And( [ self, other ] ) def checkRecursion( self, parseElementList ): @@ -2437,9 +3308,18 @@ class And(ParseExpression): class Or(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the expression that matches the longest string will be used. - May be constructed using the C{'^'} operator. + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the expression that matches the longest string will be used. + May be constructed using the C{'^'} operator. + + Example:: + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + prints:: + [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(Or,self).__init__(exprs, savelist) @@ -2488,7 +3368,7 @@ class Or(ParseExpression): def __ixor__(self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #Or( [ self, other ] ) def __str__( self ): @@ -2507,9 +3387,21 @@ class Or(ParseExpression): class MatchFirst(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the first one listed is the one that will match. + May be constructed using the C{'|'} operator. + + Example:: + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(MatchFirst,self).__init__(exprs, savelist) @@ -2544,7 +3436,7 @@ class MatchFirst(ParseExpression): def __ior__(self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #MatchFirst( [ self, other ] ) def __str__( self ): @@ -2563,9 +3455,58 @@ class MatchFirst(ParseExpression): class Each(ParseExpression): - """Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. + """ + Requires all given C{ParseExpression}s to be found, but in any order. + Expressions may be separated by whitespace. + May be constructed using the C{'&'} operator. + + Example:: + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + prints:: + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 """ def __init__( self, exprs, savelist = True ): super(Each,self).__init__(exprs, savelist) @@ -2619,17 +3560,7 @@ class Each(ParseExpression): loc,results = e._parse(instring,loc,doActions) resultlist.append(results) - finalResults = ParseResults([]) - for r in resultlist: - dups = {} - for k in r.keys(): - if k in finalResults: - tmp = ParseResults(finalResults[k]) - tmp += ParseResults(r[k]) - dups[k] = tmp - finalResults += ParseResults(r) - for k,v in dups.items(): - finalResults[k] = v + finalResults = sum(resultlist, ParseResults([])) return loc, finalResults def __str__( self ): @@ -2648,11 +3579,16 @@ class Each(ParseExpression): class ParseElementEnhance(ParserElement): - """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.""" + """ + Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. + """ def __init__( self, expr, savelist=False ): super(ParseElementEnhance,self).__init__(savelist) if isinstance( expr, basestring ): - expr = Literal(expr) + if issubclass(ParserElement._literalStringClass, Token): + expr = ParserElement._literalStringClass(expr) + else: + expr = ParserElement._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: @@ -2720,10 +3656,22 @@ class ParseElementEnhance(ParserElement): class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. C{FollowedBy} - does *not* advance the parsing position within the input string, it only + """ + Lookahead matching of the given parse expression. C{FollowedBy} + does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list.""" + position. C{FollowedBy} always returns a null token list. + + Example:: + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + prints:: + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ def __init__( self, expr ): super(FollowedBy,self).__init__(expr) self.mayReturnEmpty = True @@ -2734,11 +3682,16 @@ class FollowedBy(ParseElementEnhance): class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. C{NotAny} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression does *not* match at the current - position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator.""" + """ + Lookahead to disallow matching with the given parse expression. C{NotAny} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression does I{not} match at the current + position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} + always returns a null token list. May be constructed using the '~' operator. + + Example:: + + """ def __init__( self, expr ): super(NotAny,self).__init__(expr) #~ self.leaveWhitespace() @@ -2747,11 +3700,7 @@ class NotAny(ParseElementEnhance): self.errmsg = "Found unwanted token, "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): - try: - self.expr.tryParse( instring, loc ) - except (ParseException,IndexError): - pass - else: + if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -2764,80 +3713,114 @@ class NotAny(ParseElementEnhance): return self.strRepr - -class ZeroOrMore(ParseElementEnhance): - """Optional repetition of zero or more of the given expression.""" - def __init__( self, expr ): - super(ZeroOrMore,self).__init__(expr) - self.mayReturnEmpty = True +class _MultipleMatch(ParseElementEnhance): + def __init__( self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + ender = stopOn + if isinstance(ender, basestring): + ender = ParserElement._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None def parseImpl( self, instring, loc, doActions=True ): - tokens = [] + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) + hasIgnoreExprs = (not not self.ignoreExprs) while 1: + if check_ender: + try_not_ender(instring, loc) if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) + preloc = self_skip_ignorables( instring, loc ) else: preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) + loc, tmptokens = self_expr_parse( instring, preloc, doActions ) if tmptokens or tmptokens.haskeys(): tokens += tmptokens except (ParseException,IndexError): pass return loc, tokens + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." + self.strRepr = "{" + _ustr(self.expr) + "}..." return self.strRepr def setResultsName( self, name, listAllMatches=False ): - ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches) + ret = super(OneOrMore,self).setResultsName(name,listAllMatches) ret.saveAsList = True return ret +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) -class OneOrMore(ParseElementEnhance): - """Repetition of one or more of the given expression.""" + Example: similar to L{OneOrMore} + """ + def __init__( self, expr, stopOn=None): + super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + def parseImpl( self, instring, loc, doActions=True ): - # must be at least one - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) try: - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) except (ParseException,IndexError): - pass - - return loc, tokens + return loc, [] def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." + self.strRepr = "[" + _ustr(self.expr) + "]..." return self.strRepr - def setResultsName( self, name, listAllMatches=False ): - ret = super(OneOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - class _NullToken(object): def __bool__(self): return False @@ -2847,9 +3830,39 @@ class _NullToken(object): _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): - """Optional matching of the given expression. - A default return string can also be specified, if the optional expression - is not found. + """ + Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + prints:: + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) """ def __init__( self, expr, default=_optionalNotMatched ): super(Optional,self).__init__( expr, savelist=False ) @@ -2879,13 +3892,60 @@ class Optional(ParseElementEnhance): return self.strRepr - class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched expression is found. - If C{include} is set to true, the matched expression is also parsed (the skipped text - and matched expression are returned as a 2-element list). The C{ignore} - argument is used to define grammars (typically quoted strings and comments) that - might contain false matches. + """ + Token for skipping over all undefined text until the matched expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default=C{False}) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default=C{None}) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default=C{None}) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + prints:: + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor """ def __init__( self, other, include=False, ignore=None, failOn=None ): super( SkipTo, self ).__init__( other ) @@ -2894,77 +3954,85 @@ class SkipTo(ParseElementEnhance): self.mayIndexError = False self.includeMatch = include self.asList = False - if failOn is not None and isinstance(failOn, basestring): - self.failOn = Literal(failOn) + if isinstance(failOn, basestring): + self.failOn = ParserElement._literalStringClass(failOn) else: self.failOn = failOn self.errmsg = "No match found for "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): - startLoc = loc + startloc = loc instrlen = len(instring) expr = self.expr - failParse = False - while loc <= instrlen: - try: - if self.failOn: + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: try: - self.failOn.tryParse(instring, loc) + tmploc = self_ignoreExpr_tryParse(instring, tmploc) except ParseBaseException: - pass - else: - failParse = True - raise ParseException(instring, loc, "Found expression " + str(self.failOn)) - failParse = False - if self.ignoreExpr is not None: - while 1: - try: - loc = self.ignoreExpr.tryParse(instring,loc) - # print("found ignoreExpr, advance to", loc) - except ParseBaseException: - break - expr._parse( instring, loc, doActions=False, callPreParse=False ) - skipText = instring[startLoc:loc] - if self.includeMatch: - loc,mat = expr._parse(instring,loc,doActions,callPreParse=False) - if mat: - skipRes = ParseResults( skipText ) - skipRes += mat - return loc, [ skipRes ] - else: - return loc, [ skipText ] - else: - return loc, [ skipText ] - except (ParseException,IndexError): - if failParse: - raise - else: - loc += 1 - raise ParseException(instring, loc, self.errmsg, self) + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + skipresult += mat + + return loc, skipresult class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - Converting to use the '<<=' operator instead will avoid this problem. + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. + + Note: take care when assigning to C{Forward} not to overlook precedence of operators. + Specifically, '|' has a lower precedence than '<<', so that:: + fwdExpr << a | b | c + will actually be evaluated as:: + (fwdExpr << a) | b | c + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the C{Forward}:: + fwdExpr << (a | b | c) + Converting to use the '<<=' operator instead will avoid this problem. + + See L{ParseResults.pprint} for an example of a recursive parser created using + C{Forward}. """ def __init__( self, other=None ): super(Forward,self).__init__( other, savelist=False ) def __lshift__( self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass(other) + other = ParserElement._literalStringClass(other) self.expr = other - self.mayReturnEmpty = other.mayReturnEmpty self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty @@ -2998,7 +4066,9 @@ class Forward(ParseElementEnhance): def __str__( self ): if hasattr(self,"name"): return self.name + return self.__class__.__name__ + ": ..." + # stubbed out for now - creates awful memory and perf issues self._revertClass = self.__class__ self.__class__ = _ForwardNoRecurse try: @@ -3023,26 +4093,29 @@ class _ForwardNoRecurse(Forward): return "..." class TokenConverter(ParseElementEnhance): - """Abstract subclass of C{ParseExpression}, for converting parsed results.""" + """ + Abstract subclass of C{ParseExpression}, for converting parsed results. + """ def __init__( self, expr, savelist=False ): super(TokenConverter,self).__init__( expr )#, savelist ) self.saveAsList = False -class Upcase(TokenConverter): - """Converter to upper case all matching tokens.""" - def __init__(self, *args): - super(Upcase,self).__init__(*args) - warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead", - DeprecationWarning,stacklevel=2) - - def postParse( self, instring, loc, tokenlist ): - return list(map( str.upper, tokenlist )) - - class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. + """ + Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the input string; + this can be disabled by specifying C{'adjacent=False'} in the constructor. + + Example:: + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ def __init__( self, expr, joinString="", adjacent=True ): super(Combine,self).__init__( expr ) @@ -3072,7 +4145,19 @@ class Combine(TokenConverter): return retToks class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.""" + """ + Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. + + Example:: + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + """ def __init__( self, expr ): super(Group,self).__init__( expr ) self.saveAsList = True @@ -3081,9 +4166,40 @@ class Group(TokenConverter): return [ tokenlist ] class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. + """ + Converter to return a repetitive expression as a list, but also as a dictionary. + Each element can also be referenced using the first token in the expression as its key. + Useful for tabular report scraping when the first column can be used as a item key. + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + prints:: + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + See more examples at L{ParseResults} of accessing fields by results name. """ def __init__( self, expr ): super(Dict,self).__init__( expr ) @@ -3115,7 +4231,24 @@ class Dict(TokenConverter): class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression.""" + """ + Converter for ignoring the results of a parsed expression. + + Example:: + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + prints:: + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + (See also L{delimitedList}.) + """ def postParse( self, instring, loc, tokenlist ): return [] @@ -3124,7 +4257,9 @@ class Suppress(TokenConverter): class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once.""" + """ + Wrapper for parse actions, to ensure they are only called once. + """ def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False @@ -3138,20 +4273,36 @@ class OnlyOnce(object): self.called = False def traceParseAction(f): - """Decorator for debugging parse actions.""" + """ + Decorator for debugging parse actions. + + Example:: + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + prints:: + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) ) + sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "< ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." if combine: @@ -3177,11 +4333,15 @@ def delimitedList( expr, delim=",", combine=False ): return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) def countedArray( expr, intExpr=None ): - """Helper to define a counted list of expressions. - This helper defines a pattern of the form:: - integer expr expr expr... - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + """ + Helper to define a counted list of expressions. + This helper defines a pattern of the form:: + integer expr expr expr... + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + + Example:: + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() def countFieldParseAction(s,l,t): @@ -3194,7 +4354,7 @@ def countedArray( expr, intExpr=None ): intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ) + return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] @@ -3206,16 +4366,17 @@ def _flatten(L): return ret def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches a - previous literal, will also match the leading C{"1:1"} in C{"1:10"}. - If this is not desired, use C{matchPreviousExpr}. - Do *not* use with packrat parsing enabled. + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches a + previous literal, will also match the leading C{"1:1"} in C{"1:10"}. + If this is not desired, use C{matchPreviousExpr}. + Do I{not} use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): @@ -3225,24 +4386,26 @@ def matchPreviousLiteral(expr): else: # flatten t tokens tflat = _flatten(t.asList()) - rep << And( [ Literal(tt) for tt in tflat ] ) + rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) return rep def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches by - expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; - the expressions are evaluated first, and then compared, so - C{"1"} is compared with C{"10"}. - Do *not* use with packrat parsing enabled. + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches by + expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; + the expressions are evaluated first, and then compared, so + C{"1"} is compared with C{"10"}. + Do I{not} use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() @@ -3255,6 +4418,7 @@ def matchPreviousExpr(expr): raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): @@ -3266,16 +4430,27 @@ def _escapeRegexRangeChars(s): return _ustr(s) def oneOf( strs, caseless=False, useRegex=True ): - """Helper to quickly define a set of alternative Literals, and makes sure to do - longest-first testing when there is a conflict, regardless of the input order, - but returns a C{L{MatchFirst}} for best performance. - - Parameters: - - strs - a string of space-delimited literals, or a list of string literals - - caseless - (default=False) - treat all literals as caseless - - useRegex - (default=True) - as an optimization, will generate a Regex + """ + Helper to quickly define a set of alternative Literals, and makes sure to do + longest-first testing when there is a conflict, regardless of the input order, + but returns a C{L{MatchFirst}} for best performance. + + Parameters: + - strs - a string of space-delimited literals, or a collection of string literals + - caseless - (default=C{False}) - treat all literals as caseless + - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) + + Example:: + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + prints:: + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ if caseless: isequal = ( lambda a,b: a.upper() == b.upper() ) @@ -3289,12 +4464,10 @@ def oneOf( strs, caseless=False, useRegex=True ): symbols = [] if isinstance(strs,basestring): symbols = strs.split() - elif isinstance(strs, collections.Sequence): - symbols = list(strs[:]) - elif isinstance(strs, _generatorType): + elif isinstance(strs, collections.Iterable): symbols = list(strs) else: - warnings.warn("Invalid argument to oneOf, expected string or list", + warnings.warn("Invalid argument to oneOf, expected string or iterable", SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() @@ -3318,41 +4491,76 @@ def oneOf( strs, caseless=False, useRegex=True ): #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) try: if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ) + return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ) + return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) except: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) # last resort, just use MatchFirst - return MatchFirst( [ parseElementClass(sym) for sym in symbols ] ) + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. + """ + Helper to easily and clearly define a dictionary by specifying the respective patterns + for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens + in the proper order. The key pattern can include delimiting markers or punctuation, + as long as they are suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the C{Dict} results can include named token + fields. + + Example:: + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + prints:: + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} """ return Dict( ZeroOrMore( Group ( key + value ) ) ) def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not - require the inspect module to chase up the call stack. By default, returns a - string containing the original parsed text. + """ + Helper to return the original, untokenized text for a given expression. Useful to + restore the parsed fields of an HTML start tag into the raw tag text itself, or to + revert separate tokens with intervening whitespace back to the original matching + input text. By default, returns astring containing the original parsed text. - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{L{ParseResults}} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values.""" + If the optional C{asString} argument is passed as C{False}, then the return value is a + C{L{ParseResults}} containing any results names that were originally matched, and a + single token containing the original matched text from the input string. So if + the expression passed to C{L{originalTextFor}} contains expressions with defined + results names, you must set C{asString} to C{False} if you want to preserve those + results name values. + + Example:: + src = "this is test bold text normal text " + for tag in ("b","i"): + opener,closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + prints:: + [' bold text '] + ['text'] + """ locMarker = Empty().setParseAction(lambda s,loc,t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False @@ -3361,27 +4569,37 @@ def originalTextFor(expr, asString=True): extractText = lambda s,l,t: s[t._original_start:t._original_end] else: def extractText(s,l,t): - del t[:] - t.insert(0, s[t._original_start:t._original_end]) - del t["_original_start"] - del t["_original_end"] + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs return matchExpr def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, even - if all but one are non-empty.""" + """ + Helper to undo pyparsing's default grouping of And expressions, even + if all but one are non-empty. + """ return TokenConverter(expr).setParseAction(lambda t:t[0]) def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending locations in the input string. - This helper adds the following results names: - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains C{} characters, you may want to call - C{L{ParserElement.parseWithTabs}} + """ + Helper to decorate a returned token with its starting and ending locations in the input string. + This helper adds the following results names: + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results + + Be careful if the input text contains C{} characters, you may want to call + C{L{ParserElement.parseWithTabs}} + + Example:: + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + prints:: + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] """ locator = Empty().setParseAction(lambda s,l,t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) @@ -3402,21 +4620,22 @@ _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" def srange(s): - r"""Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be:: - a single character - an escaped character with a leading backslash (such as \- or \]) - an escaped hex character with a leading '\x' (\x21, which is a '!' character) - (\0x## is also supported for backwards compatibility) - an escaped octal character with a leading '\0' (\041, which is a '!' character) - a range of any of the above, separated by a dash ('a-z', etc.) - any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) + r""" + Helper to easily define string ranges for use in Word construction. Borrows + syntax from regexp '[]' string range definitions:: + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + The input string must be enclosed in []'s, and the returned string is the expanded + character set joined into a single string. + The values enclosed in the []'s may be: + - a single character + - an escaped character with a leading backslash (such as C{\-} or C{\]}) + - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) + (C{\0x##} is also supported for backwards compatibility) + - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) + - a range of any of the above, separated by a dash (C{'a-z'}, etc.) + - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) """ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) try: @@ -3425,8 +4644,9 @@ def srange(s): return "" def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at a specific - column in the input text. + """ + Helper method for defining parse actions that require matching at a specific + column in the input text. """ def verifyCol(strg,locn,toks): if col(locn,strg) != n: @@ -3434,57 +4654,83 @@ def matchOnlyAtCol(n): return verifyCol def replaceWith(replStr): - """Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{L{transformString}()}. """ - #def _replFunc(*args): - # return [replStr] - #return _replFunc - return functools.partial(next, itertools.repeat([replStr])) + Helper method for common parse actions that simply return a literal value. Especially + useful when used with C{L{transformString}()}. + + Example:: + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s,l,t: [replStr] def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed quoted strings. - To use, add this parse action to quoted string using:: - quotedString.setParseAction( removeQuotes ) """ - return t[0][1:-1] + Helper parse action for removing quotation marks from parsed quoted strings. -def upcaseTokens(s,l,t): - """Helper parse action to convert tokens to upper case.""" - return [ tt.upper() for tt in map(_ustr,t) ] + Example:: + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] -def downcaseTokens(s,l,t): - """Helper parse action to convert tokens to lower case.""" - return [ tt.lower() for tt in map(_ustr,t) ] + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + +def tokenMap(func, *args): + """ + Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional + args are passed, they are forwarded to the given function as additional arguments after + the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the + parsed data to an integer using base 16. + + Example (compare the last to example in L{ParserElement.transformString}:: + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + prints:: + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s,l,t): + return [func(tokn, *args) for tokn in t] -def keepOriginalText(s,startLoc,t): - """DEPRECATED - use new helper method C{L{originalTextFor}}. - Helper parse action to preserve original parsed text, - overriding any nested parse actions.""" - try: - endloc = getTokensEndLoc() - except ParseException: - raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") - del t[:] - t += ParseResults(s[startLoc:endloc]) - return t - -def getTokensEndLoc(): - """Method to be called from within a parse action to determine the end - location of the parsed tokens.""" - import inspect - fstack = inspect.stack() try: - # search up the stack (through intervening argument normalizers) for correct calling routine - for f in fstack[2:]: - if f[3] == "_parseNoCache": - endloc = f[0].f_locals["loc"] - return endloc - else: - raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") - finally: - del fstack + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name + return pa + +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""Helper parse action to convert tokens to upper case.""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""Helper parse action to convert tokens to lower case.""" + def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): @@ -3508,40 +4754,90 @@ def _makeTags(tagStr, xml): Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine(_L("") - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % tagStr) + openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) + closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname) openTag.tag = resname closeTag.tag = resname return openTag, closeTag def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, given a tag name""" + """ + Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches + tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. + + Example:: + text = 'More info at the pyparsing wiki page' + # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple + a,a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the tag (like "href" shown here) are also accessible as named results + print(link.link_text, '->', link.href) + prints:: + pyparsing -> http://pyparsing.wikispaces.com + """ return _makeTags( tagStr, False ) def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, given a tag name""" + """ + Helper to construct opening and closing tag expressions for XML, given a tag name. Matches + tags only in the given upper/lower case. + + Example: similar to L{makeHTMLTags} + """ return _makeTags( tagStr, True ) def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start tags created - with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{} or C{
}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python + """ + Helper to create a validating parse action to be used with start tags created + with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag + with a required attribute value, to avoid false matches on common tags such as + C{} or C{
}. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - """ + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ if args: attrs = args[:] else: @@ -3558,9 +4854,37 @@ def withAttribute(*args,**attrDict): withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): - """Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - """ + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ classattr = "%s:class" % namespace if namespace else "class" return withAttribute(**{classattr : classname}) @@ -3569,40 +4893,69 @@ opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - - lpar - expression for matching left-parentheses (default=Suppress('(')) - - rpar - expression for matching right-parentheses (default=Suppress(')')) + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signedInteger + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr - thisExpr = Forward()#.setName("expr%d" % i) + thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) @@ -3636,37 +4989,77 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction( pa ) - thisExpr <<= ( matchExpr | lastExpr ) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret + operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()) +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default="("); can also be a pyparsing expression - - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - - content - expression for items within the nested lists (default=None) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") @@ -3697,23 +5090,86 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. - Parameters: - - blockStatementExpr - expression defining syntax of statement that + Parameters: + - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - - indentStack - list created by caller to manage indentation stack + - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the + - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements - (default=True) - - A valid block must contain at least one C{blockStatement}. + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return @@ -3738,9 +5194,9 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = Empty() + Empty().setParseAction(checkSubIndent) - PEER = Empty().setParseAction(checkPeerIndent) - UNDENT = Empty().setParseAction(checkUnindent) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + @@ -3749,57 +5205,371 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr + return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) -commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline() -_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "')) -replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" -htmlComment = Regex(r"") -restOfLine = Regex(r".*").leaveWhitespace() -dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") -cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?").setName("HTML comment") +"Comment of the form C{}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers}, L{reals}, L{scientific notation}) + - common L{programming identifiers} + - network addresses (L{MAC}, L{IPv4}, L{IPv6}) + - ISO8601 L{dates} and L{datetime} + - L{UUID} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signedInteger = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signedInteger().setParseAction(convertToFloat) + '/' + signedInteger().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signedInteger + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + sciReal = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sciReal | real | signedInteger).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = 'More info at the
pyparsing wiki page' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) if __name__ == "__main__": - selectToken = CaselessLiteral( "select" ) - fromToken = CaselessLiteral( "from" ) - - ident = Word( alphas, alphanums + "_$" ) - columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - columnNameList = Group( delimitedList( columnName ) ).setName("columns") - tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - tableNameList = Group( delimitedList( tableName ) ).setName("tables") - simpleSQL = ( selectToken + \ - ( '*' | columnNameList ).setResultsName( "columns" ) + \ - fromToken + \ - tableNameList.setResultsName( "tables" ) ) - - simpleSQL.runTests("""\ - SELECT * from XYZZY, ABC - select * from SYS.XYZZY - Select A from Sys.dual - Select AA,BB,CC from Sys.dual - Select A, B, C from Sys.dual - Select A, B, C from Sys.dual - Xelect A, B, C from Sys.dual - Select A, B, C frox Sys.dual - Select - Select ^^^ frox Sys.dual - Select A, B, C from Sys.dual, Table2""") + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/pkg_resources/_vendor/vendored.txt b/pkg_resources/_vendor/vendored.txt index 46532c0a..a30a409d 100644 --- a/pkg_resources/_vendor/vendored.txt +++ b/pkg_resources/_vendor/vendored.txt @@ -1,3 +1,3 @@ packaging==16.7 -pyparsing==2.0.6 +pyparsing==2.1.8 six==1.10.0 -- cgit v1.2.1 From a527fa46a1bb7829af90a3bea544709995c4f1ea Mon Sep 17 00:00:00 2001 From: stepshal Date: Thu, 18 Aug 2016 08:42:56 +0700 Subject: Add missing whitespace after comma. --- pkg_resources/__init__.py | 34 ++++++++--------- pkg_resources/tests/test_resources.py | 70 +++++++++++++++++------------------ 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 58214292..d94e6339 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -752,8 +752,8 @@ class WorkingSet(object): if entry is None: entry = dist.location - keys = self.entry_keys.setdefault(entry,[]) - keys2 = self.entry_keys.setdefault(dist.location,[]) + keys = self.entry_keys.setdefault(entry, []) + keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return @@ -1353,7 +1353,7 @@ def get_default_cache(): # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), - (('HOMEDRIVE','HOMEPATH'), app_data), + (('HOMEDRIVE', 'HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME @@ -1392,7 +1392,7 @@ def safe_version(version): # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: - version = version.replace(' ','.') + version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) @@ -1410,7 +1410,7 @@ def to_filename(name): Any '-' characters are currently replaced with '_'. """ - return name.replace('-','_') + return name.replace('-', '_') def invalid_marker(text): @@ -1508,7 +1508,7 @@ class NullProvider: cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) - script_code = compile(script_text, script_filename,'exec') + script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): @@ -1965,7 +1965,7 @@ def find_on_path(importer, path_item, only=False): if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata( - path_item, os.path.join(path_item,'EGG-INFO') + path_item, os.path.join(path_item, 'EGG-INFO') ) ) else: @@ -2037,7 +2037,7 @@ def _handle_ns(packageName, path_item): module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) - elif not hasattr(module,'__path__'): + elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) @@ -2089,8 +2089,8 @@ def declare_namespace(packageName): # Track what packages are namespaces, so when new path items are added, # they can be updated - _namespace_packages.setdefault(parent,[]).append(packageName) - _namespace_packages.setdefault(packageName,[]) + _namespace_packages.setdefault(parent, []).append(packageName) + _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, @@ -2104,7 +2104,7 @@ def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: - for package in _namespace_packages.get(parent,()): + for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) @@ -2482,7 +2482,7 @@ class Distribution(object): elif not evaluate_marker(marker): reqs = [] extra = safe_extra(extra) or None - dm.setdefault(extra,[]).extend(parse_requirements(reqs)) + dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): @@ -2578,7 +2578,7 @@ class Distribution(object): self._get_metadata('entry_points.txt'), self ) if group is not None: - return ep_map.get(group,{}) + return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): @@ -2682,7 +2682,7 @@ class Distribution(object): return False return True - def clone(self,**kw): + def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): @@ -2769,7 +2769,7 @@ _distributionImpl = { } -def issue_warning(*args,**kw): +def issue_warning(*args, **kw): level = 1 g = globals() try: @@ -2916,12 +2916,12 @@ def split_sections(s): # wrap up last segment yield section, content -def _mkstemp(*args,**kw): +def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open - return tempfile.mkstemp(*args,**kw) + return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index 4e7652e3..1d663b83 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -51,15 +51,15 @@ class TestDistro: assert list(ad) == ['foopkg'] # Distributions sort by version - assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.3-1','1.2'] + assert [dist.version for dist in ad['FooPkg']] == ['1.4', '1.3-1', '1.2'] # Removing a distribution leaves sequence alone ad.remove(ad['FooPkg'][1]) - assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.2'] + assert [dist.version for dist in ad['FooPkg']] == ['1.4', '1.2'] # And inserting adds them in order ad.add(dist_from_fn("FooPkg-1.9.egg")) - assert [dist.version for dist in ad['FooPkg']] == ['1.9','1.4','1.2'] + assert [dist.version for dist in ad['FooPkg']] == ['1.9', '1.4', '1.2'] ws = WorkingSet([]) foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg") @@ -86,7 +86,7 @@ class TestDistro: ws.add(foo14) assert ad.best_match(req, ws).version == '1.4' - def checkFooPkg(self,d): + def checkFooPkg(self, d): assert d.project_name == "FooPkg" assert d.key == "foopkg" assert d.version == "1.3.post1" @@ -97,7 +97,7 @@ class TestDistro: def testDistroBasics(self): d = Distribution( "/some/path", - project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32" + project_name="FooPkg", version="1.3-1", py_version="2.4", platform="win32" ) self.checkFooPkg(d) @@ -115,7 +115,7 @@ class TestDistro: d = Distribution( "/some/path", project_name="FooPkg", py_version="2.4", platform="win32", metadata=Metadata( - ('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n") + ('PKG-INFO', "Metadata-Version: 1.0\nVersion: 1.3-1\n") ) ) self.checkFooPkg(d) @@ -164,7 +164,7 @@ class TestDistro: ad.add(Baz) # Activation list now includes resolved dependency - assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) == [Foo,Baz] + assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) == [Foo, Baz] # Requests for conflicting versions produce VersionConflict with pytest.raises(VersionConflict) as vc: ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad) @@ -218,7 +218,7 @@ class TestDistro: quux = Distribution.from_filename("/foo_dir/quux-1.0.dist-info") ad.add(quux) res = list(ws.resolve(parse_requirements("Foo[baz]"), ad)) - assert res == [Foo,quux] + assert res == [Foo, quux] def test_marker_evaluation_with_multiple_extras(self): ad = pkg_resources.Environment([]) @@ -238,7 +238,7 @@ class TestDistro: fred = Distribution.from_filename("/foo_dir/fred-0.1.dist-info") ad.add(fred) res = list(ws.resolve(parse_requirements("Foo[baz,bar]"), ad)) - assert sorted(res) == [fred,quux,Foo] + assert sorted(res) == [fred, quux, Foo] def test_marker_evaluation_with_extras_loop(self): ad = pkg_resources.Environment([]) @@ -274,19 +274,19 @@ class TestDistro: docutils>=0.3 [fastcgi] fcgiapp>=0.1""") - self.checkRequires(d,"Twisted>=1.5") + self.checkRequires(d, "Twisted>=1.5") self.checkRequires( - d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] + d, "Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"] ) self.checkRequires( - d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] + d, "Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"] ) self.checkRequires( - d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), - ["docgen","fastcgi"] + d, "Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(), + ["docgen", "fastcgi"] ) self.checkRequires( - d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), + d, "Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(), ["fastcgi", "docgen"] ) with pytest.raises(pkg_resources.UnknownExtra): @@ -348,7 +348,7 @@ class TestEntryPoints: def setup_method(self, method): self.dist = Distribution.from_filename( - "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]'))) + "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt', '[x]'))) def testBasics(self): ep = EntryPoint( @@ -405,7 +405,7 @@ class TestEntryPoints: submap_expect = dict( feature1=EntryPoint('feature1', 'somemodule', ['somefunction']), - feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']), + feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1', 'extra2']), feature3=EntryPoint('feature3', 'this.module', extras=['something']) ) submap_str = """ @@ -423,7 +423,7 @@ class TestEntryPoints: EntryPoint.parse_group("x", ["foo=baz", "foo=bar"]) def testParseMap(self): - m = EntryPoint.parse_map({'xyz':self.submap_str}) + m = EntryPoint.parse_map({'xyz': self.submap_str}) self.checkSubMap(m['xyz']) assert list(m.keys()) == ['xyz'] m = EntryPoint.parse_map("[xyz]\n" + self.submap_str) @@ -480,7 +480,7 @@ class TestRequirements: hash(( "twisted", packaging.specifiers.SpecifierSet(">=1.2"), - frozenset(["foo","bar"]), + frozenset(["foo", "bar"]), None )) ) @@ -521,9 +521,9 @@ class TestParsing: assert list(parse_requirements('')) == [] def testYielding(self): - for inp,out in [ - ([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']), - (['x\n\n','y'], ['x','y']), + for inp, out in [ + ([], []), ('x', ['x']), ([[]], []), (' x\n y', ['x', 'y']), + (['x\n\n', 'y'], ['x', 'y']), ]: assert list(pkg_resources.yield_lines(inp)) == out @@ -626,9 +626,9 @@ class TestParsing: req, = parse_requirements('foo >= 1.0, < 3') def testVersionEquality(self): - def c(s1,s2): - p1, p2 = parse_version(s1),parse_version(s2) - assert p1 == p2, (s1,s2,p1,p2) + def c(s1, s2): + p1, p2 = parse_version(s1), parse_version(s2) + assert p1 == p2, (s1, s2, p1, p2) c('1.2-rc1', '1.2rc1') c('0.4', '0.4.0') @@ -642,13 +642,13 @@ class TestParsing: c('1.2.a', '1.2a') def testVersionOrdering(self): - def c(s1,s2): - p1, p2 = parse_version(s1),parse_version(s2) - assert p1 < p2, (s1,s2,p1,p2) + def c(s1, s2): + p1, p2 = parse_version(s1), parse_version(s2) + assert p1 < p2, (s1, s2, p1, p2) - c('2.1','2.1.1') - c('2a1','2b0') - c('2a1','2.1') + c('2.1', '2.1.1') + c('2a1', '2b0') + c('2a1', '2.1') c('2.3a1', '2.3') c('2.1-1', '2.1-2') c('2.1-1', '2.1.1') @@ -660,8 +660,8 @@ class TestParsing: c('0.4', '4.0') c('0.0.4', '0.4.0') c('0post1', '0.4post1') - c('2.1.0-rc1','2.1.0') - c('2.1dev','2.1a0') + c('2.1.0-rc1', '2.1.0') + c('2.1dev', '2.1a0') torture = """ 0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1 @@ -669,9 +669,9 @@ class TestParsing: 0.77.2-1 0.77.1-1 0.77.0-1 """.split() - for p,v1 in enumerate(torture): + for p, v1 in enumerate(torture): for v2 in torture[p + 1:]: - c(v2,v1) + c(v2, v1) def testVersionBuildout(self): """ -- cgit v1.2.1 From 10bf8e72f80925d924abfa6d635ad738233ae79c Mon Sep 17 00:00:00 2001 From: stepshal Date: Thu, 18 Aug 2016 09:12:15 +0700 Subject: Fix spacing after comment hash. --- pkg_resources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 58214292..17b69727 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -2976,7 +2976,7 @@ def _initialize_master_working_set(): # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). - dist = None # ensure dist is defined for del dist below + dist = None # ensure dist is defined for del dist below for dist in working_set: dist.activate(replace=False) del dist -- cgit v1.2.1 From 8d1eecaef52a1baf2b9fc6c772880d8c537b562f Mon Sep 17 00:00:00 2001 From: "J. Goutin" Date: Thu, 18 Aug 2016 13:42:07 +0200 Subject: Add numpy version check --- setuptools/msvc.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index 360c1a68..bffaa6aa 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -6,6 +6,7 @@ import sys import platform import itertools import distutils.errors +from distutils.version import StrictVersion from setuptools.extern.six.moves import filterfalse @@ -228,9 +229,9 @@ def msvc14_gen_lib_options(*args, **kwargs): """ if "numpy.distutils" in sys.modules: import numpy as np - return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) - else: - return unpatched['msvc14_gen_lib_options'](*args, **kwargs) + if StrictVersion(np.__version__) < StrictVersion('1.11.2'): + return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) + return unpatched['msvc14_gen_lib_options'](*args, **kwargs) def _augment_exception(exc, version, arch=''): -- cgit v1.2.1 From 643f0ae4b4330f0cacec3654a7cb1e94a9316618 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 15:57:20 -0400 Subject: Update changelog --- CHANGES.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 9f098df9..b47465de 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,15 +2,16 @@ CHANGES ======= -v25.3.1 +v25.3.0 ------- * #739 Fix unquoted libpaths by fixing compatibility between `numpy.distutils` and `distutils._msvccompiler` for numpy < 1.11.2 (Fix issue #728, error also fixed in Numpy). -v25.3.0 -------- +* #731: Bump certifi. + +* Style updates. See #740, #741, #743, #744, #742, #747. -#731: Bump certifi. +* #735: include license file. v25.2.0 ------- -- cgit v1.2.1 From 787383732d45e6565f0e68e268a7157e3198cd8c Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 15:57:32 -0400 Subject: =?UTF-8?q?Bump=20version:=2025.2.0=20=E2=86=92=2025.3.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- setup.cfg | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 92fd8354..6e543e94 100755 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 25.2.0 +current_version = 25.3.0 commit = True tag = True diff --git a/setup.py b/setup.py index 5fa237d5..4b3bf3fe 100755 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ def pypi_link(pkg_filename): setup_params = dict( name="setuptools", - version="25.2.0", + version="25.3.0", description="Easily download, build, install, upgrade, and uninstall " "Python packages", author="Python Packaging Authority", -- cgit v1.2.1 From 7c32dac163b1d8f1256caf0a4e42ed19ff74d150 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 15:57:32 -0400 Subject: Added tag v25.3.0 for changeset 2371456ae99d --- .hgtags | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgtags b/.hgtags index 75a9fb08..ccc770e2 100644 --- a/.hgtags +++ b/.hgtags @@ -301,3 +301,4 @@ c350190e7bbf274e6728f14af7451b1fd3aaeba2 v25.1.2 76143bb477b50314ab6f4ccc4ced80ee43f0dc94 v25.1.5 2db4c66aeae47217aaf92099a9875e9e810c9cbb v25.1.6 2083d7c3fadcf15b3bc07f7532440efbcf8fd18d v25.2.0 +2371456ae99d11187c33deacf1308aded31081d9 v25.3.0 -- cgit v1.2.1 From fc6050ad4c1481be0a1aba1f056e76aa8be50039 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Fri, 19 Aug 2016 15:58:00 -0400 Subject: changelog --- CHANGES.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 5a2b9928..3c4b118e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -8,6 +8,14 @@ v25.1.3 * #714 and #704: Revert fix as it breaks other components downstream that can't handle unicode. See #709, #710, and #712. +* Add Extension(py_limited_api=True). When set to a truthy value, + that extension gets a filename apropriate for code using Py_LIMITED_API. + When used correctly this allows a single compiled extension to work on + all future versions of CPython 3. + The py_limited_api argument only controls the filename. To be + compatible with multiple versions of Python 3, the C extension + will also need to set -DPy_LIMITED_API=... and be modified to use + only the functions in the limited API. v25.1.2 ------- -- cgit v1.2.1 From c6d604f053b12920d774324b9fa36211fa9e2eaf Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:01:42 -0400 Subject: Move changelog into a new release --- CHANGES.rst | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 10be5821..cd203952 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,6 +2,18 @@ CHANGES ======= +v25.4.0 +------- + +* Add Extension(py_limited_api=True). When set to a truthy value, + that extension gets a filename apropriate for code using Py_LIMITED_API. + When used correctly this allows a single compiled extension to work on + all future versions of CPython 3. + The py_limited_api argument only controls the filename. To be + compatible with multiple versions of Python 3, the C extension + will also need to set -DPy_LIMITED_API=... and be modified to use + only the functions in the limited API. + v25.3.0 ------- @@ -44,14 +56,6 @@ v25.1.3 * #714 and #704: Revert fix as it breaks other components downstream that can't handle unicode. See #709, #710, and #712. -* Add Extension(py_limited_api=True). When set to a truthy value, - that extension gets a filename apropriate for code using Py_LIMITED_API. - When used correctly this allows a single compiled extension to work on - all future versions of CPython 3. - The py_limited_api argument only controls the filename. To be - compatible with multiple versions of Python 3, the C extension - will also need to set -DPy_LIMITED_API=... and be modified to use - only the functions in the limited API. v25.1.2 ------- -- cgit v1.2.1 From c7ee084a4e94b2061f50b7a6633f814cf3caf615 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:02:23 -0400 Subject: Reorganize imports --- setuptools/command/build_ext.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index 7bb4d24c..81d0c32c 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -1,12 +1,12 @@ +import os +import sys +import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils.errors import DistutilsError from distutils import log -import os -import sys -import itertools from setuptools.extension import Library @@ -104,7 +104,7 @@ class build_ext(_build_ext): filename = _build_ext.get_ext_filename(self, fullname) if fullname in self.ext_map: ext = self.ext_map[fullname] - if (sys.version_info[0] != 2 + if (sys.version_info[0] != 2 and getattr(ext, 'py_limited_api') and get_abi3_suffix()): from distutils.sysconfig import get_config_var -- cgit v1.2.1 From a3a04186f0b0b8fb9206f2813a768d655a7acc04 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:02:39 -0400 Subject: Move import to top --- setuptools/command/build_ext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index 81d0c32c..df8f03fc 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -1,6 +1,7 @@ import os import sys import itertools +import imp from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler @@ -61,7 +62,6 @@ if_dl = lambda s: s if have_rtld else '' def get_abi3_suffix(): """Return the file extension for an abi3-compliant Extension()""" - import imp for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): if '.abi3' in suffix: # Unix return suffix -- cgit v1.2.1 From e3b053192c96cc247c3e12dae78923631397dce7 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:05:04 -0400 Subject: Move import to the top --- setuptools/command/build_ext.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index df8f03fc..ca46b22a 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -5,7 +5,7 @@ import imp from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler +from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log @@ -17,10 +17,8 @@ try: except ImportError: _build_ext = _du_build_ext -from distutils.sysconfig import get_config_var - -get_config_var("LDSHARED") # make sure _config_vars is initialized -del get_config_var +# make sure _config_vars is initialized +get_config_var("LDSHARED") from distutils.sysconfig import _config_vars as _CONFIG_VARS @@ -107,7 +105,6 @@ class build_ext(_build_ext): if (sys.version_info[0] != 2 and getattr(ext, 'py_limited_api') and get_abi3_suffix()): - from distutils.sysconfig import get_config_var so_ext = get_config_var('SO') filename = filename[:-len(so_ext)] filename = filename + get_abi3_suffix() -- cgit v1.2.1 From 097e92abaa7b72815a5eb9c232293ef4bbc1b626 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:06:58 -0400 Subject: Extract variable for boolean expression for nicer indentation. --- setuptools/command/build_ext.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index ca46b22a..9caabfd5 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -10,6 +10,7 @@ from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library +from setuptools.extern import six try: # Attempt to use Cython for building extensions, if available @@ -102,9 +103,12 @@ class build_ext(_build_ext): filename = _build_ext.get_ext_filename(self, fullname) if fullname in self.ext_map: ext = self.ext_map[fullname] - if (sys.version_info[0] != 2 + use_abi3 = ( + six.PY3 and getattr(ext, 'py_limited_api') - and get_abi3_suffix()): + and get_abi3_suffix() + ) + if use_abi3: so_ext = get_config_var('SO') filename = filename[:-len(so_ext)] filename = filename + get_abi3_suffix() -- cgit v1.2.1 From 2cde914c7bc8c8e5a59f937317d47898b38426e4 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:09:06 -0400 Subject: Use six for python major version detection --- setuptools/tests/test_build_ext.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index f2e1f59d..0edc92ec 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -1,7 +1,9 @@ import sys import distutils.command.build_ext as orig - from distutils.sysconfig import get_config_var + +from setuptools.extern import six + from setuptools.command.build_ext import build_ext, get_abi3_suffix from setuptools.dist import Distribution from setuptools.extension import Extension @@ -27,7 +29,7 @@ class TestBuildExt: of Python 3 if 'is_abi3' is truthy on Extension() """ print(get_abi3_suffix()) - + extension = Extension('spam.eggs', ['eggs.c'], py_limited_api=True) dist = Distribution(dict(ext_modules=[extension])) cmd = build_ext(dist) @@ -35,9 +37,9 @@ class TestBuildExt: assert 'spam.eggs' in cmd.ext_map res = cmd.get_ext_filename('spam.eggs') - if sys.version_info[0] == 2 or not get_abi3_suffix(): + if six.PY2 or not get_abi3_suffix(): assert res.endswith(get_config_var('SO')) elif sys.platform == 'win32': assert res.endswith('eggs.pyd') else: - assert 'abi3' in res \ No newline at end of file + assert 'abi3' in res -- cgit v1.2.1 From cea7420c9417f14f6c069dabf1df23055f681af1 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:10:05 -0400 Subject: =?UTF-8?q?Bump=20version:=2025.3.0=20=E2=86=92=2025.4.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- setup.cfg | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 6e543e94..b089c68d 100755 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 25.3.0 +current_version = 25.4.0 commit = True tag = True diff --git a/setup.py b/setup.py index 4b3bf3fe..cd4353d0 100755 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ def pypi_link(pkg_filename): setup_params = dict( name="setuptools", - version="25.3.0", + version="25.4.0", description="Easily download, build, install, upgrade, and uninstall " "Python packages", author="Python Packaging Authority", -- cgit v1.2.1 From 8aae14c12ea79b6d293e40db02c027e1279a46a8 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:10:05 -0400 Subject: Added tag v25.4.0 for changeset f713f9faaaa3 --- .hgtags | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgtags b/.hgtags index ccc770e2..06083e1c 100644 --- a/.hgtags +++ b/.hgtags @@ -302,3 +302,4 @@ c350190e7bbf274e6728f14af7451b1fd3aaeba2 v25.1.2 2db4c66aeae47217aaf92099a9875e9e810c9cbb v25.1.6 2083d7c3fadcf15b3bc07f7532440efbcf8fd18d v25.2.0 2371456ae99d11187c33deacf1308aded31081d9 v25.3.0 +f713f9faaaa33c0e9a628dc9322ef8d1fbeb8319 v25.4.0 -- cgit v1.2.1 From a7931d03be180878538bae00c78d93304e5ca09b Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:26:18 -0400 Subject: Rewrite test, dynamically generating the .tar.gz file, with help from dstufft in #748. --- setuptools/tests/test_archive_util.py | 50 +++++++++++++---------------------- 1 file changed, 18 insertions(+), 32 deletions(-) diff --git a/setuptools/tests/test_archive_util.py b/setuptools/tests/test_archive_util.py index 3e679c11..b789e9ac 100644 --- a/setuptools/tests/test_archive_util.py +++ b/setuptools/tests/test_archive_util.py @@ -1,5 +1,8 @@ # coding: utf-8 +import tarfile +import io + from setuptools.extern import six import pytest @@ -12,41 +15,24 @@ def tarfile_with_unicode(tmpdir): """ Create a tarfile containing only a file whose name is a zero byte file called testimàˆge.png. - - TODO: Is it possible to generate this file programmatically? """ - data = ( - b'\x1f\x8b\x08\x00R\xfe\x9fW\x00\x03\xed\xd6AO\x13A\x14\x00' - b'\xe0\x11b\x8c\x8d\' z\xf02\x07=x\xd9\xce\xcc\xce\xec\xd4\xc3' - b'&\xa21\xb4X\\lI\xa3r0\xc3v\xc1\x8a\xec6\xcbbz3\x1cH\x80x\x93' - b'x\xeaQ\x12\x13\x0f$z\xd5\x13\x17\xa9U\x8e&\xfc\x06\x13\xff' - b'\x82\xb3\xa5\n\xb6J!\x16\x8c\xf0\xbed2\x9d\xd7\xe9v\xba\xb3' - b'\xafo\x8c\xe4\xa8\xaa\xa4=U\xf4\xc2\xa4\xf1 \xf2f\xa3\xd2\xcc' - b'\xfa\xcb)\xcf(\xfbS\xa8K\x08!\x16\xe78\xee\xa5%\x1a=a\xdb' - b'\xe3\x06FLL\x99\xe4R#\x9cbB%\xa5\x1c\xe1J\xb7\x16\xb0\x97' - b'\xb9\xd9H\x85z)\x8fT\xa8\xdc\xe0\xcf\xf3\xf4\xb4\xc9\xc9=\xae' - b'\xb3\xfdS\xf0\xcf\xfe?\xc1$.\xab\xe8\xa1m\xb4\xed~\x82\x11' - b'\xec\xea\xb1gS.\t%&e,\x8e\xa9\xdd1"S\tf\xe2\xfc\x8dt&{\xcf(zO' - b'lj\xe9]d\x8c\xec\n\x97\xfc\xc0\xe6\xdc\x12\x84\x9a2AS?\xc2\xfe' - b'\xe3\x92?m\xd3\xc4\xbf\xbe\x05\'Z\xfb\xbew\xff;:\xe5\xbf)dK\xfe' - b'\x0b*\x04\xc2\xa4\xfbKiw\xc2\xf3\x1f\x9d>\x7f\x06\xf5 4\xa2\\' - b'\xec\xe4\xf1]\xdc\x14\xc7\xd0Y\xdd\x98n\xefu\x8b\xc7\xdf\xf6w' - b'\xc9\xc1\xb1\xb1\\\xf3e\xfc\x89\xaan\xf9\x96)\xa7v\xe2\x17\xdc' - b'`\xc6(\x86Ay"\xa8\x18*\x8a\xc2\xd2\xc4\x9c~"\xf5\x9b\x95\xea' - b'\xeb\xc2\xf0\x95Z2][[t>\xd63\x9f\xb2K\x9b\x1b\xce\xed\xfa\x9d7' - b'\xb9W\x85\xdaH\xf6\xf3\x87z\xef\xd2\xe5\x17+\x03\xf3\x0b\xb9' - b'\xbe[}\xf3\xe7V\xab+h\xa8w\xbd\xecl\x86\xfd\xce\xf3\x9e/\xd7' - b'\x9e\xbe}\xb6|ih||uk\xeb>z\xb7v\xf1\xeb\xdf\xdf\xafcf\xa7\xfa' - b'\x1f\xde\xbf@\xc7\xfa\xcfEK\xfe[B\x10\xa8\xffGAW\xe9F\xfd\xefP' - b'\xfb\x894\x7f[\xfb\xcd\x14\xcef\xae\x0f\xe6tE/\xdc4\xdc\xd0' - b'\xd33\x02\xbf9\xcbJq}f\xe0t\xdf\'\x04~\x95\xeb0\x9c\x10\x8e' - b'\xd0a\xd7\xfeX\xa7\xfc\x8f\xf3\xe5\xd7\xfc\xe7\xc2\xe2P\xff' - b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - b'\x008\xa8\xef\xb1g\xe1Z\x00(\x00\x00' - ) + tarobj = io.BytesIO() + + with tarfile.open(fileobj=tarobj, mode="w:gz") as tgz: + data = b"" + + filename = "testimàˆge.png" + if six.PY2: + filename = filename.decode('utf-8') + + t = tarfile.TarInfo(filename) + t.size = len(data) + + tgz.addfile(t, io.BytesIO(data)) + target = tmpdir / 'unicode-pkg-1.0.tar.gz' with open(str(target), mode='wb') as tf: - tf.write(data) + tf.write(tarobj.getvalue()) return str(target) -- cgit v1.2.1 From c296634d44de4a9715d09e4773b73b2d233fbbc4 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 19 Aug 2016 16:50:58 -0400 Subject: Pin to pytest < 3 until skip errors can be resolved. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index cd4353d0..db619735 100755 --- a/setup.py +++ b/setup.py @@ -181,7 +181,7 @@ setup_params = dict( tests_require=[ 'setuptools[ssl]', 'pytest-flake8', - 'pytest>=2.8', + 'pytest>=2.8,<3.0dev', ] + (['mock'] if sys.version_info[:2] < (3, 3) else []), setup_requires=[ ] + pytest_runner + wheel, -- cgit v1.2.1 From 08511f5fdfe04a8ea6e3ae73e086e8a29a0a5cd1 Mon Sep 17 00:00:00 2001 From: stepshal Date: Sat, 20 Aug 2016 04:37:32 +0700 Subject: Fix quantity of blank lines after code object, class of function definition. --- pkg_resources/__init__.py | 55 +++++++++++++++++++++++++++++++ pkg_resources/extern/__init__.py | 2 ++ pkg_resources/tests/test_markers.py | 1 + pkg_resources/tests/test_pkg_resources.py | 8 ++++- pkg_resources/tests/test_resources.py | 4 +++ setuptools/command/build_ext.py | 2 ++ setuptools/tests/test_build_ext.py | 1 + setuptools/tests/test_manifest.py | 1 + 8 files changed, 73 insertions(+), 1 deletion(-) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 87455a0d..033fc8aa 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -209,10 +209,12 @@ def parse_version(v): _state_vars = {} + def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) + def __getstate__(): state = {} g = globals() @@ -220,25 +222,31 @@ def __getstate__(): state[k] = g['_sget_' + v](g[k]) return state + def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_' + _state_vars[k]](k, g[k], v) return state + def _sget_dict(val): return val.copy() + def _sset_dict(key, ob, state): ob.clear() ob.update(state) + def _sget_object(val): return val.__getstate__() + def _sset_object(key, ob, state): ob.__setstate__(state) + _sget_none = _sset_none = lambda *args: None @@ -265,6 +273,7 @@ def get_supported_platform(): pass return plat + __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', @@ -311,8 +320,10 @@ __all__ = [ 'run_main', 'AvailableDistributions', ] + class ResolutionError(Exception): """Abstract base for dependency resolution errors""" + def __repr__(self): return self.__class__.__name__ + repr(self.args) @@ -391,6 +402,8 @@ class DistributionNotFound(ResolutionError): class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" + + _provider_factories = {} PY_MAJOR = sys.version[:3] @@ -400,6 +413,7 @@ SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 + def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` @@ -409,6 +423,7 @@ def register_loader_type(loader_type, provider_factory): """ _provider_factories[loader_type] = provider_factory + def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): @@ -421,6 +436,7 @@ def get_provider(moduleOrReq): loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) + def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] @@ -436,9 +452,11 @@ def _macosx_vers(_cache=[]): _cache.append(version.split('.')) return _cache[0] + def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + def get_build_platform(): """Return this platform's string for platform-specific distributions @@ -464,6 +482,7 @@ def get_build_platform(): pass return plat + macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat @@ -524,9 +543,11 @@ def run_script(dist_spec, script_name): ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) + # backward compatibility run_main = run_script + def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, six.string_types): @@ -537,14 +558,17 @@ def get_distribution(dist): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist + def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) + def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) + def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) @@ -1332,6 +1356,7 @@ class ResourceManager: """ # XXX + def get_default_cache(): """Determine the default cache location @@ -1376,6 +1401,7 @@ def get_default_cache(): "Please set the PYTHON_EGG_CACHE environment variable" ) + def safe_name(name): """Convert an arbitrary string to a standard distribution name @@ -1538,6 +1564,7 @@ class NullProvider: "Can't perform this operation for loaders without 'get_data()'" ) + register_loader_type(object, NullProvider) @@ -1562,6 +1589,7 @@ class EggProvider(NullProvider): old = path path, base = os.path.split(path) + class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" @@ -1587,6 +1615,7 @@ class DefaultProvider(EggProvider): type(None)) register_loader_type(loader_cls, cls) + DefaultProvider._register() @@ -1601,6 +1630,7 @@ class EmptyProvider(NullProvider): def __init__(self): pass + empty_provider = EmptyProvider() @@ -1836,6 +1866,7 @@ class ZipProvider(EggProvider): def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) + register_loader_type(zipimport.zipimporter, ZipProvider) @@ -1913,8 +1944,10 @@ class EggMetadata(ZipProvider): self.module_path = importer.archive self._setup_prefix() + _declare_state('dict', _distribution_finders={}) + def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items @@ -1931,6 +1964,7 @@ def find_distributions(path_item, only=False): finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) + def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. @@ -1951,12 +1985,17 @@ def find_eggs_in_zip(importer, path_item, only=False): for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist + register_finder(zipimport.zipimporter, find_eggs_in_zip) + def find_nothing(importer, path_item, only=False): return () + + register_finder(object, find_nothing) + def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) @@ -1997,6 +2036,8 @@ def find_on_path(importer, path_item, only=False): for item in dists: yield item break + + register_finder(pkgutil.ImpImporter, find_on_path) if hasattr(importlib_machinery, 'FileFinder'): @@ -2023,6 +2064,7 @@ def register_namespace_handler(importer_type, namespace_handler): """ _namespace_handlers[importer_type] = namespace_handler + def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" @@ -2055,6 +2097,7 @@ def _rebuild_mod_path(orig_path, package_name, module): corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] + def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path @@ -2100,6 +2143,7 @@ def declare_namespace(packageName): finally: _imp.release_lock() + def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() @@ -2111,6 +2155,7 @@ def fixup_namespace_packages(path_item, parent=None): finally: _imp.release_lock() + def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" @@ -2123,6 +2168,7 @@ def file_ns_handler(importer, path_item, packageName, module): # Only return the path if it's not already there return subpath + register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) @@ -2133,6 +2179,7 @@ if hasattr(importlib_machinery, 'FileFinder'): def null_ns_handler(importer, path_item, packageName, module): return None + register_namespace_handler(object, null_ns_handler) @@ -2140,6 +2187,7 @@ def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) + def _normalize_cached(filename, _cache={}): try: return _cache[filename] @@ -2147,6 +2195,7 @@ def _normalize_cached(filename, _cache={}): _cache[filename] = result = normalize_path(filename) return result + def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. @@ -2155,6 +2204,7 @@ def _is_unpacked_egg(path): path.lower().endswith('.egg') ) + def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() @@ -2176,6 +2226,7 @@ def yield_lines(strs): for s in yield_lines(ss): yield s + MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" @@ -2783,6 +2834,7 @@ def issue_warning(*args, **kw): class RequirementParseError(ValueError): + def __str__(self): return ' '.join(self.args) @@ -2807,6 +2859,7 @@ def parse_requirements(strs): class Requirement(packaging.requirements.Requirement): + def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" try: @@ -2867,6 +2920,7 @@ def _get_mro(cls): return cls.__mro__[1:] return cls.__mro__ + def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): @@ -2916,6 +2970,7 @@ def split_sections(s): # wrap up last segment yield section, content + def _mkstemp(*args, **kw): old_open = os.open try: diff --git a/pkg_resources/extern/__init__.py b/pkg_resources/extern/__init__.py index 6758d36f..492f66f1 100644 --- a/pkg_resources/extern/__init__.py +++ b/pkg_resources/extern/__init__.py @@ -6,6 +6,7 @@ class VendorImporter: A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) @@ -67,5 +68,6 @@ class VendorImporter: if self not in sys.meta_path: sys.meta_path.append(self) + names = 'packaging', 'pyparsing', 'six' VendorImporter(__name__, names).install() diff --git a/pkg_resources/tests/test_markers.py b/pkg_resources/tests/test_markers.py index 8d451de3..78810b6e 100644 --- a/pkg_resources/tests/test_markers.py +++ b/pkg_resources/tests/test_markers.py @@ -5,6 +5,7 @@ except ImportError: from pkg_resources import evaluate_marker + @mock.patch('platform.python_version', return_value='2.7.10') def test_ordering(python_version_mock): assert evaluate_marker("python_full_version > '2.7.3'") is True diff --git a/pkg_resources/tests/test_pkg_resources.py b/pkg_resources/tests/test_pkg_resources.py index 8b276ffc..361fe657 100644 --- a/pkg_resources/tests/test_pkg_resources.py +++ b/pkg_resources/tests/test_pkg_resources.py @@ -24,6 +24,7 @@ try: except NameError: unicode = str + def timestamp(dt): """ Return a timestamp for a local, naive datetime instance. @@ -34,13 +35,16 @@ def timestamp(dt): # Python 3.2 and earlier return time.mktime(dt.timetuple()) + class EggRemover(unicode): + def __call__(self): if self in sys.path: sys.path.remove(self) if os.path.exists(self): os.remove(self) + class TestZipProvider(object): finalizers = [] @@ -94,7 +98,9 @@ class TestZipProvider(object): assert f.read() == 'hello, world!' manager.cleanup_resources() + class TestResourceManager(object): + def test_get_cache_path(self): mgr = pkg_resources.ResourceManager() path = mgr.get_cache_path('foo') @@ -107,6 +113,7 @@ class TestIndependence: """ Tests to ensure that pkg_resources runs independently from setuptools. """ + def test_setuptools_not_imported(self): """ In a separate Python environment, import pkg_resources and assert @@ -122,7 +129,6 @@ class TestIndependence: subprocess.check_call(cmd) - class TestDeepVersionLookupDistutils(object): @pytest.fixture diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index 1d663b83..2ed56233 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -34,6 +34,7 @@ class Metadata(pkg_resources.EmptyProvider): dist_from_fn = pkg_resources.Distribution.from_filename + class TestDistro: def testCollection(self): @@ -294,6 +295,7 @@ class TestDistro: class TestWorkingSet: + def test_find_conflicting(self): ws = WorkingSet([]) Foo = Distribution.from_filename("/foo_dir/Foo-1.2.egg") @@ -380,6 +382,7 @@ class TestEntryPoints: assert ep.name == 'html+mako' reject_specs = "foo", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2" + @pytest.mark.parametrize("reject_spec", reject_specs) def test_reject_spec(self, reject_spec): with pytest.raises(ValueError): @@ -434,6 +437,7 @@ class TestEntryPoints: with pytest.raises(ValueError): EntryPoint.parse_map(self.submap_str) + class TestRequirements: def testBasics(self): diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index 9caabfd5..f994b626 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -59,6 +59,7 @@ elif os.name != 'nt': if_dl = lambda s: s if have_rtld else '' + def get_abi3_suffix(): """Return the file extension for an abi3-compliant Extension()""" for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): @@ -67,6 +68,7 @@ def get_abi3_suffix(): elif suffix == '.pyd': # Windows return suffix + class build_ext(_build_ext): def run(self): diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index 0edc92ec..ac002f44 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -8,6 +8,7 @@ from setuptools.command.build_ext import build_ext, get_abi3_suffix from setuptools.dist import Distribution from setuptools.extension import Extension + class TestBuildExt: def test_get_ext_filename(self): diff --git a/setuptools/tests/test_manifest.py b/setuptools/tests/test_manifest.py index 6e67ca61..6360270d 100644 --- a/setuptools/tests/test_manifest.py +++ b/setuptools/tests/test_manifest.py @@ -51,6 +51,7 @@ def quiet(): def touch(filename): open(filename, 'w').close() + # The set of files always in the manifest, including all files in the # .egg-info directory default_files = frozenset(map(make_local_path, [ -- cgit v1.2.1 From e3ffde678c36ca778476574194cdc6d436d82263 Mon Sep 17 00:00:00 2001 From: stepshal Date: Sat, 20 Aug 2016 08:15:52 +0700 Subject: Remove trailing whitespace. (#751) --- setuptools/msvc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setuptools/msvc.py b/setuptools/msvc.py index bffaa6aa..26e399cc 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -223,7 +223,7 @@ def msvc14_get_vc_env(plat_spec): def msvc14_gen_lib_options(*args, **kwargs): """ - Patched "distutils._msvccompiler.gen_lib_options" for fix + Patched "distutils._msvccompiler.gen_lib_options" for fix compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2) """ -- cgit v1.2.1 From 06df852e7cda567b6f8ab6831486285f0e2989a4 Mon Sep 17 00:00:00 2001 From: Gabi Davar Date: Sat, 20 Aug 2016 15:10:57 +0300 Subject: fix tests on windows (#754) - have tox pass env variables used by setuptools on windows. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 8c09e7df..7ce7ce41 100644 --- a/tox.ini +++ b/tox.ini @@ -2,4 +2,5 @@ envlist = py26,py27,py33,py34,py35,pypy,pypy3 [testenv] +passenv=APPDATA USERPROFILE HOMEDRIVE HOMEPATH windir commands=python setup.py test -- cgit v1.2.1