summaryrefslogtreecommitdiff
path: root/ci/download_appveyor.py
diff options
context:
space:
mode:
authorNed Batchelder <ned@nedbatchelder.com>2020-11-28 13:02:50 -0500
committerNed Batchelder <ned@nedbatchelder.com>2020-11-28 16:13:11 -0500
commit12eca0fc391bb2f89138df3d6dbaabf78ff32d86 (patch)
treec6af8c4c3ce1b8e3ef4b88079394e1adc61ef240 /ci/download_appveyor.py
parent41d8aef04f172bcd78a27d7a2f138a8a712063bd (diff)
downloadpython-coveragepy-git-gh-actions.tar.gz
Remove unneeded CI and kitting supportgh-actions
We don't use AppVeyor or Travis anymore, or make manylinux or local wheels.
Diffstat (limited to 'ci/download_appveyor.py')
-rw-r--r--ci/download_appveyor.py95
1 files changed, 0 insertions, 95 deletions
diff --git a/ci/download_appveyor.py b/ci/download_appveyor.py
deleted file mode 100644
index a3d81496..00000000
--- a/ci/download_appveyor.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Use the Appveyor API to download Windows artifacts."""
-
-import os
-import os.path
-import sys
-import zipfile
-
-import requests
-
-
-def make_auth_headers():
- """Make the authentication headers needed to use the Appveyor API."""
- with open("ci/appveyor.token") as f:
- token = f.read().strip()
-
- headers = {
- 'Authorization': 'Bearer {}'.format(token),
- }
- return headers
-
-
-def make_url(url, **kwargs):
- """Build an Appveyor API url."""
- return "https://ci.appveyor.com/api" + url.format(**kwargs)
-
-
-def get_project_build(account_project):
- """Get the details of the latest Appveyor build."""
- url = make_url("/projects/{account_project}", account_project=account_project)
- response = requests.get(url, headers=make_auth_headers())
- return response.json()
-
-
-def download_latest_artifacts(account_project):
- """Download all the artifacts from the latest build."""
- build = get_project_build(account_project)
- jobs = build['build']['jobs']
- print("Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
- for job in jobs:
- name = job['name'].partition(':')[2].split(',')[0].strip()
- print(" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
-
- url = make_url("/buildjobs/{jobid}/artifacts", jobid=job['jobId'])
- response = requests.get(url, headers=make_auth_headers())
- artifacts = response.json()
-
- for artifact in artifacts:
- is_zip = artifact['type'] == "Zip"
- filename = artifact['fileName']
- print(" {}, {} bytes".format(filename, artifact['size']))
-
- url = make_url(
- "/buildjobs/{jobid}/artifacts/{filename}",
- jobid=job['jobId'],
- filename=filename
- )
- download_url(url, filename, make_auth_headers())
-
- if is_zip:
- unpack_zipfile(filename)
- os.remove(filename)
-
-
-def ensure_dirs(filename):
- """Make sure the directories exist for `filename`."""
- dirname, _ = os.path.split(filename)
- if dirname and not os.path.exists(dirname):
- os.makedirs(dirname)
-
-
-def download_url(url, filename, headers):
- """Download a file from `url` to `filename`."""
- ensure_dirs(filename)
- response = requests.get(url, headers=headers, stream=True)
- if response.status_code == 200:
- with open(filename, 'wb') as f:
- for chunk in response.iter_content(16*1024):
- f.write(chunk)
-
-
-def unpack_zipfile(filename):
- """Unpack a zipfile, using the names in the zip."""
- with open(filename, 'rb') as fzip:
- z = zipfile.ZipFile(fzip)
- for name in z.namelist():
- print(" extracting {}".format(name))
- ensure_dirs(name)
- z.extract(name)
-
-
-if __name__ == "__main__":
- download_latest_artifacts(sys.argv[1])