1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
|
# URL extractor
# Copyright 2004, Paul McGuire
from collections import Counter
import pprint
from urllib.request import urlopen
from pyparsing import makeHTMLTags, pyparsing_common as ppc, FollowedBy, trace_parse_action
# Define the pyparsing grammar for a URL, that is:
# URLlink ::= <a href= URL>linkText</a>
# URL ::= doubleQuotedString | alphanumericWordPath
# Note that whitespace may appear just about anywhere in the link. Note also
# that it is not necessary to explicitly show this in the pyparsing grammar; by default,
# pyparsing skips over whitespace between tokens.
linkOpenTag, linkCloseTag = makeHTMLTags("a")
link = linkOpenTag + linkOpenTag.tag_body("body") + linkCloseTag.suppress()
# Add a parse action to expand relative URLs
def expand_relative_url(t):
url = t.href
if url.startswith("//"):
url = "https:" + url
elif url.startswith(("/", "?", "#")):
url = "https://www.cnn.com" + url
# Put modified URL back into input tokens
t["href"] = url
link.add_parse_action(expand_relative_url)
# Go get some HTML with some links in it.
with urlopen("https://www.cnn.com/") as serverListPage:
htmlText = serverListPage.read().decode()
# scanString is a generator that loops through the input htmlText, and for each
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks, strt, end in link.scanString(htmlText):
print(toks.startA.href, "->", toks.body)
# Create dictionary with a dict comprehension, assembled from each pair of tokens returned
# from a matched URL.
links = {toks.body: toks.href for toks, _, _ in link.scanString(htmlText)}
pprint.pprint(links)
# Parse the urls in the links using pyparsing_common.url, and tally up all
# the different domains in a Counter.
domains = Counter()
for url in links.values():
print(url)
parsed = ppc.url.parseString(url)
# print parsed fields for each new url
if parsed.host not in domains:
print(parsed.dump())
print()
# update domain counter
domains[parsed.host] += 1
# Print out a little table of all the domains in the urls
max_domain_len = max(len(d) for d in domains)
print()
print("{:{}s} {}".format("Domain", max_domain_len, "Count"))
print("{:=<{}} {:=<5}".format("", max_domain_len, ""))
for domain, count in domains.most_common():
print("{:{}s} {:5d}".format(domain, max_domain_len, count))
|