summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorJon Dufresne <jon.dufresne@gmail.com>2019-10-17 17:43:30 -0700
committerPaul McGuire <ptmcg@users.noreply.github.com>2019-10-17 19:43:30 -0500
commitfd8252e8b762677dc3d47fc28dd68685fef61f6a (patch)
treef589157e3990485020d1413601bcf58886372366 /examples
parent4e1a557ed6886ac67eb08122b5081c6464b699a3 (diff)
downloadpyparsing-git-fd8252e8b762677dc3d47fc28dd68685fef61f6a.tar.gz
Py3 cleanup: Remove use of closing() with urlopen() (#145)
In Python 3, urlopen() can always be used as a context manager. Wrapping with closing() is not necessary. https://docs.python.org/3/library/urllib.request.html#urllib.request.urlopen > This function always returns an object which can work as a context > manager …
Diffstat (limited to 'examples')
-rw-r--r--examples/getNTPserversNew.py3
-rw-r--r--examples/htmlStripper.py6
-rw-r--r--examples/partial_gene_match.py5
-rw-r--r--examples/urlExtractor.py5
-rw-r--r--examples/urlExtractorNew.py6
5 files changed, 11 insertions, 14 deletions
diff --git a/examples/getNTPserversNew.py b/examples/getNTPserversNew.py
index c86e756..ab392dc 100644
--- a/examples/getNTPserversNew.py
+++ b/examples/getNTPserversNew.py
@@ -8,7 +8,6 @@
#
import pyparsing as pp
ppc = pp.pyparsing_common
-from contextlib import closing
try:
import urllib.request
@@ -27,7 +26,7 @@ timeServerPattern = (tdStart + hostname("hostname") + tdEnd
# get list of time servers
nistTimeServerURL = "https://tf.nist.gov/tf-cgi/servers.cgi#"
-with closing(urlopen(nistTimeServerURL)) as serverListPage:
+with urlopen(nistTimeServerURL) as serverListPage:
serverListHTML = serverListPage.read().decode("UTF-8")
addrs = {}
diff --git a/examples/htmlStripper.py b/examples/htmlStripper.py
index 18f3395..eb35c70 100644
--- a/examples/htmlStripper.py
+++ b/examples/htmlStripper.py
@@ -6,8 +6,8 @@
#
# Copyright (c) 2006, 2016, Paul McGuire
#
-from contextlib import closing
-import urllib.request, urllib.parse, urllib.error
+import urllib.parse, urllib.error
+from urllib.request import urlopen
from pyparsing import (makeHTMLTags, commonHTMLEntity, replaceHTMLEntity,
htmlComment, anyOpenTag, anyCloseTag, LineEnd, OneOrMore, replaceWith)
@@ -17,7 +17,7 @@ commonHTMLEntity.setParseAction(replaceHTMLEntity)
# get some HTML
targetURL = "https://wiki.python.org/moin/PythonDecoratorLibrary"
-with closing(urllib.request.urlopen( targetURL )) as targetPage:
+with urlopen( targetURL ) as targetPage:
targetHTML = targetPage.read().decode("UTF-8")
# first pass, strip out tags and translate entities
diff --git a/examples/partial_gene_match.py b/examples/partial_gene_match.py
index 3d48f9d..e4c59af 100644
--- a/examples/partial_gene_match.py
+++ b/examples/partial_gene_match.py
@@ -4,12 +4,11 @@
#
import pyparsing as pp
-import urllib.request
-from contextlib import closing
+from urllib.request import urlopen
# read in a bunch of genomic data
data_url = "http://toxodb.org/common/downloads/release-6.0/Tgondii/TgondiiApicoplastORFsNAs_ToxoDB-6.0.fasta"
-with closing(urllib.request.urlopen(data_url)) as datafile:
+with urlopen(data_url) as datafile:
fastasrc = datafile.read().decode()
# define parser to extract gene definitions
diff --git a/examples/urlExtractor.py b/examples/urlExtractor.py
index fbc2fa6..70835da 100644
--- a/examples/urlExtractor.py
+++ b/examples/urlExtractor.py
@@ -1,8 +1,7 @@
# URL extractor
# Copyright 2004, Paul McGuire
from pyparsing import makeHTMLTags, pyparsing_common as ppc
-import urllib.request
-from contextlib import closing
+from urllib.request import urlopen
import pprint
linkOpenTag, linkCloseTag = makeHTMLTags('a')
@@ -14,7 +13,7 @@ linkBody.addParseAction(lambda toks: ' '.join(toks[0].strip().split()))
link = linkOpenTag + linkBody("body") + linkCloseTag.suppress()
# Go get some HTML with some links in it.
-with closing(urllib.request.urlopen("https://www.cnn.com/")) as serverListPage:
+with urlopen("https://www.cnn.com/") as serverListPage:
htmlText = serverListPage.read().decode("UTF-8")
# scanString is a generator that loops through the input htmlText, and for each
diff --git a/examples/urlExtractorNew.py b/examples/urlExtractorNew.py
index d876eea..795322a 100644
--- a/examples/urlExtractorNew.py
+++ b/examples/urlExtractorNew.py
@@ -1,8 +1,8 @@
# URL extractor
# Copyright 2004, Paul McGuire
from pyparsing import makeHTMLTags
-from contextlib import closing
-import urllib.request, urllib.parse, urllib.error
+import urllib.parse, urllib.error
+from urllib.request import urlopen
import pprint
# Define the pyparsing grammar for a URL, that is:
@@ -15,7 +15,7 @@ linkOpenTag, linkCloseTag = makeHTMLTags("a")
link = linkOpenTag + linkOpenTag.tag_body("body") + linkCloseTag.suppress()
# Go get some HTML with some links in it.
-with closing(urllib.request.urlopen("https://www.cnn.com/")) as serverListPage:
+with urlopen("https://www.cnn.com/") as serverListPage:
htmlText = serverListPage.read()
# scanString is a generator that loops through the input htmlText, and for each