diff --git a/README_PYPI.rst b/README_PYPI.rst index 4bf31c22..4d1ef993 100644 --- a/README_PYPI.rst +++ b/README_PYPI.rst @@ -1,11 +1,12 @@ -A helper library for writing `Alfred 2 and 3`_ workflows. +A helper library for writing `Alfred 2, 3 and 4`_ workflows. -Supports macOS 10.6+ and Python 2.6 and 2.7 (Alfred 3 is 10.9+/2.7 only). +Supports macOS 10.7+ and Python 2.7 (Alfred 3 is 10.9+/2.7 only). Alfred-Workflow is designed to take the grunt work out of writing a workflow. -It gives you the tools to create a fast and featureful Alfred workflow from an API, application or library in minutes. +It gives you the tools to create a fast and featureful Alfred workflow from an +API, application or library in minutes. http://www.deanishe.net/alfred-workflow/ @@ -109,8 +110,9 @@ Download the ``alfred-workflow-X.X.X.zip`` file from the `GitHub releases`_ page and extract the ZIP to the root directory of your workflow (where ``info.plist`` is). -Alternatively, you can download `the source code`_ from the `GitHub repository`_ -and copy the ``workflow`` subfolder to the root directory of your workflow. +Alternatively, you can download `the source code`_ from the +`GitHub repository`_ and copy the ``workflow`` subfolder to the root +directory of your workflow. Your workflow directory should look something like this (where ``yourscript.py`` contains your workflow code and ``info.plist`` is @@ -140,7 +142,7 @@ http://www.deanishe.net/alfred-workflow/. .. _v2 branch: https://github.com/deanishe/alfred-workflow/tree/v2 .. _requests: http://docs.python-requests.org/en/latest/ -.. _Alfred 2 and 3: http://www.alfredapp.com/ +.. _Alfred 2, 3 and 4: http://www.alfredapp.com/ .. _GitHub releases: https://github.com/deanishe/alfred-workflow/releases .. _the source code: https://github.com/deanishe/alfred-workflow/archive/master.zip .. _GitHub repository: https://github.com/deanishe/alfred-workflow diff --git a/alfred-workflow-1.36.zip b/alfred-workflow-1.36.zip deleted file mode 100644 index 89952cb5..00000000 Binary files a/alfred-workflow-1.36.zip and /dev/null differ diff --git a/alfred-workflow-1.37.zip b/alfred-workflow-1.37.zip new file mode 100644 index 00000000..b18da405 Binary files /dev/null and b/alfred-workflow-1.37.zip differ diff --git a/bin/testone b/bin/testone index af08299b..915ee479 100755 --- a/bin/testone +++ b/bin/testone @@ -10,11 +10,13 @@ testone ... Run test script(s) with coverage for one package. Usage: - testone ... + testone [-v|-V] ... testone -h Options: - -h Show this message and exit. + -v verbose output + -V very verbose output + -h show this message and exit Example: testone workflow.notify tests/test_notify.py @@ -44,13 +46,19 @@ function success() { printf "${green}$@${nc}\n" } - -while getopts ":h" opt; do +vopts= +while getopts ":hvV" opt; do case $opt in h) usage exit 0 ;; + v) + vopts="-v" + ;; + V) + vopts="-vv" + ;; \?) log "Invalid option: -$OPTARG" exit 1 @@ -71,7 +79,7 @@ package="$1" shift # Run tests -pytest --cov="${package}" "$@" +pytest $vopts --cov="${package}" "$@" # ret1=$? ret1=${PIPESTATUS[0]} diff --git a/docs/Alfred-Workflow.docset.zip b/docs/Alfred-Workflow.docset.zip index 5e77c156..76a20ddf 100644 Binary files a/docs/Alfred-Workflow.docset.zip and b/docs/Alfred-Workflow.docset.zip differ diff --git a/docs/api/updates.rst.inc b/docs/api/updates.rst.inc index eccc27b4..2310486d 100644 --- a/docs/api/updates.rst.inc +++ b/docs/api/updates.rst.inc @@ -34,10 +34,13 @@ on how to enable automatic updates in your workflow. Helpers ^^^^^^^ +.. autoclass:: Download + :members: + .. autoclass:: Version :members: -.. autofunction:: download_workflow +.. autofunction:: retrieve_download .. _GitHub releases: https://help.github.com/categories/85/articles diff --git a/docs/conf.py b/docs/conf.py index 3a42017d..0c0b37ae 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,6 +12,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. +from datetime import date # import sphinx_bootstrap_theme # import sphinx_rtd_theme import sys @@ -58,7 +59,7 @@ # General information about the project. project = u'Alfred-Workflow' -copyright = u' 2013–2017 Dean Jackson' +copyright = u' 2013–{} Dean Jackson'.format(date.today().year) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/docs/guide/update.rst b/docs/guide/update.rst index c63f1526..255b85e2 100644 --- a/docs/guide/update.rst +++ b/docs/guide/update.rst @@ -47,28 +47,34 @@ Alfred's default installation mechanism. .. important:: - Releases marked as ``pre-release`` on GitHub will be ignored unless the + Releases marked ``Pre-release`` on GitHub will be ignored unless the ``workflow:prereleases`` :ref:`magic argument ` has been enabled or the ``prereleases`` key is set to ``True`` in the ``update_settings`` :class:`dict`. -Supporting Alfred 2 and Alfred 3 --------------------------------- +Supporting multiple Alfred versions +----------------------------------- -Workflows created *or edited* in Alfred 3 are fundamentally incompatible -with Alfred 2, even if no Alfred 3-only features are used. +Workflows created *or edited* in any version of Alfred are fundamentally +incompatible with earlier versions, even if no new features are used. If you want to make a new release of an existing workflow that breaks -compatibility with Alfred 2, it's important that you use the alternate -``.alfred3workflow`` file extension for your release binaries to prevent Alfred -2 installations trying to update themselves to death. - -You can have both an ``.alfredworkflow`` file and an ``.alfred3workflow`` file -in the same release. If Alfred-Workflow is running under Alfred 3, it will -prefer the ``.alfred3workflow`` if present. Under Alfred 2, or if the release -contains no ``.alfred3workflow`` file, Alfred-Workflow will use the -``.alfredworkflow`` file. +compatibility with older versions of Alfred, it's important that you use +an appropriate alternate versions-specific file extension to +hide the files from older versions. That means, use +``.alfred3workflow`` for workflows compatible with Alfred 3+, +``.alfred4workflow`` for workflows compatible with Alfred 4+, and so +on. + +Files with extensions that have a higher version number than the running +version of Alfred are ignored by the updater. ``.alfredworkflow`` files +are installed regardless of the Alfred version. + +You can have both an ``.alfredworkflow`` file and an ``.alfred4workflow`` file +in the same release. If Alfred-Workflow is running under Alfred 4, it will +prefer the ``.alfred4workflow`` if present. Under Alfred 3, or if the release +contains no ``.alfred4workflow`` file, the ``.alfredworkflow`` file is used. There may only be one file of each type, however, or the release will be considered invalid. @@ -80,11 +86,11 @@ Configuration To use self-updating, you must pass a :class:`dict` as the ``update_settings`` argument to :class:`Workflow`. It **must** have the key/value pair ``github_slug``, which is your username and the name of the -workflow's repo in the format ``username/reponame``. The version number of the currently -installed workflow must also be specified (see below). There are several ways -to specify the version number of your workflow: +workflow's repo in the format ``username/reponame``. The version number +of the currently installed workflow must also be specified (see below). There +are several ways to specify the version number of your workflow: -1. In Alfred 3, use the Workflow Version field in the workflow +1. In Alfred 3+, use the Workflow Version field in the workflow configuration sheet. This saves the version number in ``info.plist``. :class:`Workflow` will retrieve the version from the environment variables set by Alfred when it runs your workflow (or by @@ -181,9 +187,9 @@ number format and associated features. .. note:: - Alfred-Workflow will automatically check in the background if a newer - version of your workflow is available, but will *not* automatically inform - the user nor download and install the update. + Alfred-Workflow will automatically check in the background if a newer + version of your workflow is available, but will *not* automatically inform + the user nor download and install the update. Usage diff --git a/docs/index.rst b/docs/index.rst index d797628f..bacffe1b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ Welcome to Alfred-Workflow Go to :ref:`quickindex`. -Alfred-Workflow is a Python helper library for `Alfred 2 and 3`_ workflow +Alfred-Workflow is a Python helper library for `Alfred 2, 3 and 4`_ workflow authors, developed and hosted on `GitHub`_. Alfred workflows typically take user input, fetch data from the Web or @@ -16,7 +16,7 @@ elsewhere, filter them and display results to the user. Alfred-Workflow takes care of a lot of the details for you, allowing you to concentrate your efforts on your workflow's functionality. -Alfred-Workflow supports macOS 10.6+ (Python 2.6 and 2.7). +Alfred-Workflow supports macOS 10.7+ (Python 2.7). Features @@ -41,12 +41,12 @@ Features debugging and management of the workflow -Alfred 3-only features ----------------------- +Alfred 3+ features +------------------ - Set :ref:`workflows variables ` from code - Advanced modifiers -- Alfred 3-only updates (won't break Alfred 2 installs) +- Alfred version-aware updates (ignores incompatible updates) - :ref:`Automatic re-running of Script Filters `. @@ -65,7 +65,7 @@ box (changing ``API_KEY``): import sys from workflow import Workflow, ICON_WEB, web - # To use Alfred 3's feedback mechanism: + # To use Alfred 3+ feedback mechanism: # from workflow import Workflow3 API_KEY = 'your-pinboard-api-key' @@ -103,5 +103,4 @@ Pinboard item in Alfred to open it in your browser. .. _GitHub: https://github.com/deanishe/alfred-workflow/ .. _requests: http://docs.python-requests.org/en/latest/ -.. _Alfred 2 and 3: https://www.alfredapp.com/ - +.. _Alfred 2, 3 and 4: https://www.alfredapp.com/ diff --git a/extras/alabaster b/extras/alabaster index ac33f116..0568c65a 160000 --- a/extras/alabaster +++ b/extras/alabaster @@ -1 +1 @@ -Subproject commit ac33f116b986bf104e1bc61e9c19024cfd965e9a +Subproject commit 0568c65a6c34d7a51516d3e135b99d60542cbec6 diff --git a/requirements-docs.txt b/requirements-docs.txt index 810f266d..36511378 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,4 +1,4 @@ -e git+https://github.com/deanishe/alabaster/@a5ec3ee9827bcebe6112fb1b4319443a340a33c5#egg=alabaster -Sphinx==1.6.5 -sphinxcontrib-napoleon==0.6.1 -doc2dash==2.2.0 +doc2dash==2.3.0 +Sphinx==1.8.5 +sphinxcontrib-napoleon==0.7 diff --git a/requirements-test.txt b/requirements-test.txt index 21546f19..80df6f3b 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,8 +1,9 @@ -pyobjc-core==4.0.1 -pyobjc-framework-Cocoa==4.0.1 -pytest==3.3.0 -pytest-cov==2.5.1 -pytest-httpbin==0.3.0 -pytest-localserver==0.4.1 -tox==2.9.1 -twine==1.9.1 +pyobjc-framework-Cocoa==5.2 +pytest==4.4.1 +pytest-cov==2.7.1 +pytest-httpbin==1.0.0 +pytest-localserver==0.5.0 +tox==3.9.0 +twine==1.13.0 +flake8==3.7.7 +flake8-docstrings==1.3.0 diff --git a/run-tests.sh b/run-tests.sh index c8784193..a75157a2 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -2,6 +2,30 @@ rootdir="$( cd "$( dirname "$0" )"; pwd )" +usage() { + cat > /dev/stderr <] [...] + +Run test script(s) with coverage for one package. + +Usage: + run-tests.sh [-v|-V] [-c ] [-l] [-t] [...] + run-tests.sh -h + +Options: + -c coverage report package + -l run linter + -t run tests (default) + -v verbose output + -V very verbose output + -h show this message and exit + +Example: + run-tests.sh -c workflow.notify tests/test_notify.py + +EOS +} + if [ -t 1 ]; then red='\033[0;31m' green='\033[0;32m' @@ -24,41 +48,108 @@ function success() { printf "${green}$@${nc}\n" } +coverpkg=workflow +vopts= +dolint=1 +dotest=0 +forcetest=1 +while getopts ":c:hltvV" opt; do + case $opt in + c) + coverpkg="$OPTARG" + ;; + l) + dolint=0 + ;; + t) + forcetest=0 + ;; + h) + usage + exit 0 + ;; + v) + vopts="-v" + ;; + V) + vopts="-vv" + ;; + \?) + log "Invalid option: -$OPTARG" + exit 1 + ;; + esac +done +shift $((OPTIND-1)) # Set test options and run tests #------------------------------------------------------------------------- -# More options are in tox.ini -export PYTEST_ADDOPTS="--cov-report=html" -pytest --cov=workflow tests +unset alfred_version alfred_workflow_version alfred_workflow_bundleid +unset alfred_workflow_name alfred_workflow_cache alfred_workflow_data -ret1=${PIPESTATUS[0]} +files=(tests) +if [[ $# -gt 0 ]]; then + files=$@ +fi -echo +if [[ "$dolint" -eq 0 ]]; then + dotest=1 +fi -case "$ret1" in - 0) success "TESTS OK" ;; - *) fail "TESTS FAILED" ;; -esac +if [[ "$forcetest" -eq 0 ]]; then + dotest=0 +fi -log "" +coverage erase +# command rm -fv .coverage.* + +if [[ $dotest -eq 0 ]]; then + # More options are in tox.ini + export PYTEST_ADDOPTS="--cov-report=html" + pytest $vopts --cov="$coverpkg" $files + ret1=${PIPESTATUS[0]} + echo + + case "$ret1" in + 0) success "TESTS OK" ;; + *) fail "TESTS FAILED" ;; + esac + if [[ "$ret1" -ne 0 ]]; then + exit $ret1 + fi + echo +fi + + +if [[ $dolint -eq 0 ]]; then + flake8 $files + ret2=${PIPESTATUS[0]} + + case "$ret2" in + 0) success "LINTING OK" ;; + *) fail "LINTING FAILED" ;; + esac +fi + +if [[ "$ret2" -ne 0 ]]; then + exit $ret2 +fi +if [[ "$dotest" -eq 1 ]]; then + exit 0 +fi # Test coverage coverage report --fail-under 100 --show-missing -ret2=${PIPESTATUS[0]} +ret3=${PIPESTATUS[0]} echo -case "$ret2" in +case "$ret3" in 0) success "COVERAGE OK" ;; *) fail "COVERAGE FAILED" ;; esac -coverage erase - -if [[ "$ret1" -ne 0 ]]; then - exit $ret1 -fi - -exit $ret2 +test -z "$TRAVIS" && coverage erase +exit $ret3 diff --git a/setup.py b/setup.py index d500b53c..9e66e4c3 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,10 @@ # Created on 2014-08-17 # +"""Alfred-Workflow library for building Alfred 3/4 workflows.""" + import os +from os.path import dirname, join import subprocess from setuptools import setup from setuptools.command.test import test as TestCommand @@ -16,29 +19,33 @@ def read(fname): """Return contents of file `fname` in this directory.""" - return open(os.path.join(os.path.dirname(__file__), fname)).read() + return open(join(dirname(__file__), fname)).read() class PyTestCommand(TestCommand): """Enable running tests with `python setup.py test`.""" def finalize_options(self): + """Implement TestCommand.""" TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): - subprocess.call(['/bin/bash', os.path.join(os.path.dirname(__file__), - 'run-tests.sh')]) + """Implement TestCommand.""" + subprocess.call( + ['/bin/bash', join(dirname(__file__), 'run-tests.sh')]) version = read('workflow/version') +long_description = read('README_PYPI.rst') + name = 'Alfred-Workflow' author = 'Dean Jackson' author_email = 'deanishe@deanishe.net' url = 'http://www.deanishe.net/alfred-workflow/' -description = 'Full-featured helper library for writing Alfred 2/3 workflows' -keywords = 'alfred workflow' +description = 'Full-featured helper library for writing Alfred 2/3/4 workflows' +keywords = 'alfred workflow alfred4' packages = ['workflow'] package_data = {'workflow': ['version', 'Notify.tgz']} classifiers = [ @@ -64,7 +71,7 @@ def run_tests(self): name=name, version=version, description=description, - long_description=read('README_PYPI.rst'), + long_description=long_description, keywords=keywords, author=author, author_email=author_email, diff --git a/tests/__init__.py b/tests/__init__.py index e69de29b..e0310a01 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Unit tests.""" diff --git a/tests/conftest.py b/tests/conftest.py index 7c7ef488..857f3c80 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,18 +12,141 @@ from __future__ import print_function, absolute_import +from contextlib import contextmanager import os from shutil import rmtree from tempfile import mkdtemp import pytest +from workflow.workflow import Workflow + + from .util import ( INFO_PLIST_TEST, INFO_PLIST_TEST3, InfoPlist, ) +BUNDLE_ID = 'net.deanishe.alfred-workflow' +WORKFLOW_NAME = 'Alfred-Workflow Test' +WORKFLOW_VERSION = '1.1.1' + +ENV_V2 = dict( + alfred_version='2.4', + alfred_version_build='277', + alfred_workflow_version=WORKFLOW_VERSION, + alfred_workflow_bundleid=BUNDLE_ID, + alfred_workflow_name=WORKFLOW_NAME, + alfred_workflow_cache=os.path.expanduser( + '~/Library/Caches/com.runningwithcrayons.Alfred-2/' + 'Workflow Data/' + BUNDLE_ID), + alfred_workflow_data=os.path.expanduser( + '~/Library/Application Support/Alfred 2/' + 'Workflow Data/' + BUNDLE_ID), + alfred_preferences=os.path.expanduser( + '~/Library/Application Support/Alfred 2/' + 'Alfred.alfredpreferences'), +) + +ENV_V3 = dict( + alfred_version='3.8.1', + alfred_version_build='961', + alfred_workflow_version=WORKFLOW_VERSION, + alfred_workflow_bundleid=BUNDLE_ID, + alfred_workflow_name=WORKFLOW_NAME, + alfred_workflow_cache=os.path.expanduser( + '~/Library/Caches/com.runningwithcrayons.Alfred-3/' + 'Workflow Data/' + BUNDLE_ID), + alfred_workflow_data=os.path.expanduser( + '~/Library/Application Support/Alfred 3/' + 'Workflow Data/' + BUNDLE_ID), + alfred_preferences=os.path.expanduser( + '~/Library/Application Support/Alfred 3/' + 'Alfred.alfredpreferences'), +) + +ENV_V4 = dict( + alfred_version='4.0', + alfred_version_build='1061', + alfred_workflow_version=WORKFLOW_VERSION, + alfred_workflow_bundleid=BUNDLE_ID, + alfred_workflow_name=WORKFLOW_NAME, + alfred_workflow_cache=os.path.expanduser( + '~/Library/Caches/com.runningwithcrayons.Alfred/' + 'Workflow Data/' + BUNDLE_ID), + alfred_workflow_data=os.path.expanduser( + '~/Library/Application Support/Alfred/' + 'Workflow Data/' + BUNDLE_ID), + alfred_preferences=os.path.expanduser( + '~/Library/Application Support/Alfred/' + 'Alfred.alfredpreferences'), +) + +COMMON = dict( + alfred_debug='1', + alfred_preferences_localhash='adbd4f66bc3ae8493832af61a41ee609b20d8705', + alfred_theme='alfred.theme.yosemite', + alfred_theme_background='rgba(255,255,255,0.98)', + alfred_theme_subtext='3', + alfred_workflow_uid='user.workflow.B0AC54EC-601C-479A-9428-01F9FD732959', +) + + +@contextmanager +def env(**kwargs): + """Context manager to alter and restore system environment.""" + prev = os.environ.copy() + for k, v in kwargs.items(): + if v is None: + if k in os.environ: + del os.environ[k] + else: + os.environ[k] = v + + yield + + os.environ = prev + + +@pytest.fixture +def wf(alfred4, infopl): + """Provide a Workflow using Alfred 4 configuration.""" + wf = Workflow() + yield wf + wf.reset() + + +def setenv(*dicts): + """Update ``os.environ`` from ``dict``s.""" + for d in dicts: + os.environ.update(d) + + +def cleanenv(): + """Remove Alfred variables from ``os.environ``.""" + for k in os.environ.keys(): + if k.startswith('alfred_'): + del os.environ[k] + + +@pytest.fixture(scope='function') +def alfred3(): + """Context manager that sets Alfred 3 environment variables.""" + cleanenv() + setenv(COMMON, ENV_V3) + yield + cleanenv() + + +@pytest.fixture(scope='function') +def alfred4(): + """Context manager that sets Alfred 4 environment variables.""" + cleanenv() + setenv(COMMON, ENV_V4) + yield + cleanenv() + @pytest.fixture(scope='function') def tempdir(): @@ -34,19 +157,15 @@ def tempdir(): rmtree(path) -@pytest.fixture(scope='module') -def info2(): +@pytest.fixture() +def infopl2(): """Ensure ``info.plist`` exists in the working directory.""" - os.environ['alfred_version'] = '2.4' with InfoPlist(INFO_PLIST_TEST): yield - del os.environ['alfred_version'] -@pytest.fixture(scope='module') -def info3(): +@pytest.fixture() +def infopl(): """Ensure ``info.plist`` exists in the working directory.""" - os.environ['alfred_version'] = '3.2' with InfoPlist(INFO_PLIST_TEST3): yield - del os.environ['alfred_version'] diff --git a/tests/data/gh-releases-4plus.json b/tests/data/gh-releases-4plus.json new file mode 100644 index 00000000..ee3df3a4 --- /dev/null +++ b/tests/data/gh-releases-4plus.json @@ -0,0 +1,184 @@ +[ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521", + "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521/assets", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v9.0", + "id": 17132521, + "node_id": "MDc6UmVsZWFzZTE3MTMyNTIx", + "tag_name": "v9.0", + "target_commitish": "master", + "name": "Latest release (Alfred 4)", + "draft": false, + "author": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "prerelease": false, + "created_at": "2019-05-03T12:24:12Z", + "published_at": "2019-05-03T12:25:11Z", + "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/12368327", + "id": 12368327, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEyMzY4MzI3", + "name": "Dummy-9.0.alfred4workflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 1, + "created_at": "2019-05-03T12:25:01Z", + "updated_at": "2019-05-03T12:25:02Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v9.0/Dummy-9.0.alfred4workflow" + } + ], + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v9.0", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v9.0", + "body": "" + }, + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525", + "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525/assets", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v6.0", + "id": 556525, + "node_id": "MDc6UmVsZWFzZTU1NjUyNQ==", + "tag_name": "v6.0", + "target_commitish": "master", + "name": "Latest valid release", + "draft": false, + "author": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "prerelease": false, + "created_at": "2014-09-14T19:24:55Z", + "published_at": "2014-09-14T19:27:09Z", + "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/12368192", + "id": 12368192, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEyMzY4MTky", + "name": "Dummy-6.0.alfred4workflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 0, + "created_at": "2019-05-03T12:14:14Z", + "updated_at": "2019-05-03T12:14:15Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfred4workflow" + }, + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247311", + "id": 247311, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMxMQ==", + "name": "Dummy-6.0.zip", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/zip", + "state": "uploaded", + "size": 36063, + "download_count": 2, + "created_at": "2014-09-23T18:59:00Z", + "updated_at": "2014-09-23T18:59:01Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.zip" + } + ], + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v6.0", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v6.0", + "body": "" + } +] \ No newline at end of file diff --git a/tests/data/gh-releases-4plus.json.gz b/tests/data/gh-releases-4plus.json.gz new file mode 100644 index 00000000..12918d4c Binary files /dev/null and b/tests/data/gh-releases-4plus.json.gz differ diff --git a/tests/data/gh-releases-empty.json b/tests/data/gh-releases-empty.json deleted file mode 100644 index 41b42e67..00000000 --- a/tests/data/gh-releases-empty.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - -] diff --git a/tests/data/gh-releases-empty.json.gz b/tests/data/gh-releases-empty.json.gz deleted file mode 100644 index 6ac82cb7..00000000 Binary files a/tests/data/gh-releases-empty.json.gz and /dev/null differ diff --git a/tests/data/gh-releases.json b/tests/data/gh-releases.json index efdb12f8..d9cb81fd 100644 --- a/tests/data/gh-releases.json +++ b/tests/data/gh-releases.json @@ -1,18 +1,242 @@ [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132595", + "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132595/assets", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132595/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v10.0-beta", + "id": 17132595, + "node_id": "MDc6UmVsZWFzZTE3MTMyNTk1", + "tag_name": "v10.0-beta", + "target_commitish": "master", + "name": "Latest release (pre-release)", + "draft": false, + "author": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "prerelease": true, + "created_at": "2019-05-03T12:27:30Z", + "published_at": "2019-05-03T12:28:36Z", + "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/12368368", + "id": 12368368, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEyMzY4MzY4", + "name": "Dummy-10.0-beta.alfredworkflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 1, + "created_at": "2019-05-03T12:28:19Z", + "updated_at": "2019-05-03T12:28:20Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v10.0-beta/Dummy-10.0-beta.alfredworkflow" + } + ], + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v10.0-beta", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v10.0-beta", + "body": "" + }, + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521", + "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521/assets", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/17132521/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v9.0", + "id": 17132521, + "node_id": "MDc6UmVsZWFzZTE3MTMyNTIx", + "tag_name": "v9.0", + "target_commitish": "master", + "name": "Latest release (Alfred 4)", + "draft": false, + "author": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "prerelease": false, + "created_at": "2019-05-03T12:24:12Z", + "published_at": "2019-05-03T12:25:11Z", + "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/12368327", + "id": 12368327, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEyMzY4MzI3", + "name": "Dummy-9.0.alfred4workflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 1, + "created_at": "2019-05-03T12:25:01Z", + "updated_at": "2019-05-03T12:25:02Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v9.0/Dummy-9.0.alfred4workflow" + } + ], + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v9.0", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v9.0", + "body": "" + }, + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/14412055", + "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/14412055/assets", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/14412055/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v8point0", + "id": 14412055, + "node_id": "MDc6UmVsZWFzZTE0NDEyMDU1", + "tag_name": "v8point0", + "target_commitish": "master", + "name": "Invalid tag (non-semantic)", + "draft": false, + "author": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "prerelease": false, + "created_at": "2018-12-07T16:03:23Z", + "published_at": "2018-12-07T16:04:30Z", + "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/10048629", + "id": 10048629, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEwMDQ4NjI5", + "name": "Dummy-eight.alfredworkflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 2, + "created_at": "2018-12-07T16:04:24Z", + "updated_at": "2018-12-07T16:04:25Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v8point0/Dummy-eight.alfredworkflow" + } + ], + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v8point0", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v8point0", + "body": "" + }, { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/617375", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/617375/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/617375/assets{?name}", - "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v7.1-beta", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/617375/assets{?name,label}", + "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v7.1.0-beta", "id": 617375, - "tag_name": "v7.1-beta", + "node_id": "MDc6UmVsZWFzZTYxNzM3NQ==", + "tag_name": "v7.1.0-beta", "target_commitish": "master", "name": "Invalid release (pre-release status)", "draft": false, "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -29,18 +253,20 @@ "site_admin": false }, "prerelease": true, - "created_at": "2014-10-10T10:58:30Z", + "created_at": "2014-10-10T10:58:14Z", "published_at": "2014-10-10T10:59:34Z", "assets": [ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/265007", "id": 265007, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI2NTAwNw==", "name": "Dummy-7.1-beta.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -59,22 +285,23 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 35726, - "download_count": 1, + "download_count": 7, "created_at": "2014-10-10T10:59:10Z", "updated_at": "2014-10-10T10:59:12Z", - "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v7.1-beta/Dummy-7.1-beta.alfredworkflow" + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v7.1.0-beta/Dummy-7.1-beta.alfredworkflow" } ], - "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v7.1-beta", - "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v7.1-beta", + "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v7.1.0-beta", + "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v7.1.0-beta", "body": "" }, { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556526", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556526/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556526/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556526/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v7.0", "id": 556526, + "node_id": "MDc6UmVsZWFzZTU1NjUyNg==", "tag_name": "v7.0", "target_commitish": "master", "name": "Invalid release (contains no files)", @@ -82,7 +309,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -101,9 +329,7 @@ "prerelease": false, "created_at": "2014-09-14T19:25:55Z", "published_at": "2014-09-14T19:27:25Z", - "assets": [ - - ], + "assets": [], "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v7.0", "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v7.0", "body": "" @@ -111,9 +337,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556525/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v6.0", "id": 556525, + "node_id": "MDc6UmVsZWFzZTU1NjUyNQ==", "tag_name": "v6.0", "target_commitish": "master", "name": "Latest valid release", @@ -121,7 +348,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -141,15 +369,85 @@ "created_at": "2014-09-14T19:24:55Z", "published_at": "2014-09-14T19:27:09Z", "assets": [ + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/4823231", + "id": 4823231, + "node_id": "MDEyOlJlbGVhc2VBc3NldDQ4MjMyMzE=", + "name": "Dummy-6.0.alfred3workflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 1, + "created_at": "2017-09-14T12:22:03Z", + "updated_at": "2017-09-14T12:22:08Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfred3workflow" + }, + { + "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/12368192", + "id": 12368192, + "node_id": "MDEyOlJlbGVhc2VBc3NldDEyMzY4MTky", + "name": "Dummy-6.0.alfred4workflow", + "label": null, + "uploader": { + "login": "deanishe", + "id": 747913, + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/deanishe", + "html_url": "https://github.com/deanishe", + "followers_url": "https://api.github.com/users/deanishe/followers", + "following_url": "https://api.github.com/users/deanishe/following{/other_user}", + "gists_url": "https://api.github.com/users/deanishe/gists{/gist_id}", + "starred_url": "https://api.github.com/users/deanishe/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/deanishe/subscriptions", + "organizations_url": "https://api.github.com/users/deanishe/orgs", + "repos_url": "https://api.github.com/users/deanishe/repos", + "events_url": "https://api.github.com/users/deanishe/events{/privacy}", + "received_events_url": "https://api.github.com/users/deanishe/received_events", + "type": "User", + "site_admin": false + }, + "content_type": "application/octet-stream", + "state": "uploaded", + "size": 36063, + "download_count": 0, + "created_at": "2019-05-03T12:14:14Z", + "updated_at": "2019-05-03T12:14:15Z", + "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfred4workflow" + }, { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247310", "id": 247310, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMxMA==", "name": "Dummy-6.0.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -168,7 +466,7 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 36063, - "download_count": 319, + "download_count": 586, "created_at": "2014-09-23T18:59:00Z", "updated_at": "2014-09-23T18:59:01Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfredworkflow" @@ -176,12 +474,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247311", "id": 247311, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMxMQ==", "name": "Dummy-6.0.zip", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -200,7 +500,7 @@ "content_type": "application/zip", "state": "uploaded", "size": 36063, - "download_count": 1, + "download_count": 2, "created_at": "2014-09-23T18:59:00Z", "updated_at": "2014-09-23T18:59:01Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.zip" @@ -213,9 +513,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556524", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556524/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556524/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556524/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v5.0", "id": 556524, + "node_id": "MDc6UmVsZWFzZTU1NjUyNA==", "tag_name": "v5.0", "target_commitish": "master", "name": "Invalid release (contains no files)", @@ -223,7 +524,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -242,9 +544,7 @@ "prerelease": false, "created_at": "2014-09-14T19:22:44Z", "published_at": "2014-09-14T19:26:30Z", - "assets": [ - - ], + "assets": [], "tarball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/tarball/v5.0", "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v5.0", "body": "" @@ -252,9 +552,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556356", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556356/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556356/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556356/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v4.0", "id": 556356, + "node_id": "MDc6UmVsZWFzZTU1NjM1Ng==", "tag_name": "v4.0", "target_commitish": "master", "name": "Invalid release (contains 2 .alfredworkflow files)", @@ -262,7 +563,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -285,12 +587,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247308", "id": 247308, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMwOA==", "name": "Dummy-4.0.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -309,7 +613,7 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 36063, - "download_count": 360, + "download_count": 694, "created_at": "2014-09-23T18:58:25Z", "updated_at": "2014-09-23T18:58:27Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v4.0/Dummy-4.0.alfredworkflow" @@ -317,12 +621,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247309", "id": 247309, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMwOQ==", "name": "Dummy-4.1.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -341,7 +647,7 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 36063, - "download_count": 0, + "download_count": 1, "created_at": "2014-09-23T18:58:26Z", "updated_at": "2014-09-23T18:58:27Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v4.0/Dummy-4.1.alfredworkflow" @@ -354,9 +660,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556354", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556354/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556354/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556354/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v3.0", "id": 556354, + "node_id": "MDc6UmVsZWFzZTU1NjM1NA==", "tag_name": "v3.0", "target_commitish": "master", "name": "Invalid release (no .alfredworkflow file)", @@ -364,7 +671,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -387,12 +695,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247305", "id": 247305, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMwNQ==", "name": "Dummy-3.0.zip", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -411,7 +721,7 @@ "content_type": "application/zip", "state": "uploaded", "size": 36063, - "download_count": 0, + "download_count": 1, "created_at": "2014-09-23T18:57:53Z", "updated_at": "2014-09-23T18:57:54Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v3.0/Dummy-3.0.zip" @@ -424,9 +734,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556352", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556352/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556352/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556352/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v2.0", "id": 556352, + "node_id": "MDc6UmVsZWFzZTU1NjM1Mg==", "tag_name": "v2.0", "target_commitish": "master", "name": "", @@ -434,7 +745,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -457,12 +769,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247300", "id": 247300, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzMwMA==", "name": "Dummy-2.0.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -481,7 +795,7 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 36063, - "download_count": 0, + "download_count": 1, "created_at": "2014-09-23T18:57:19Z", "updated_at": "2014-09-23T18:57:21Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v2.0/Dummy-2.0.alfredworkflow" @@ -494,9 +808,10 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556350", "assets_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/556350/assets", - "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556350/assets{?name}", + "upload_url": "https://uploads.github.com/repos/deanishe/alfred-workflow-dummy/releases/556350/assets{?name,label}", "html_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/tag/v1.0", "id": 556350, + "node_id": "MDc6UmVsZWFzZTU1NjM1MA==", "tag_name": "v1.0", "target_commitish": "master", "name": "", @@ -504,7 +819,8 @@ "author": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -527,12 +843,14 @@ { "url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/releases/assets/247299", "id": 247299, + "node_id": "MDEyOlJlbGVhc2VBc3NldDI0NzI5OQ==", "name": "Dummy-1.0.alfredworkflow", "label": null, "uploader": { "login": "deanishe", "id": 747913, - "avatar_url": "https://avatars.githubusercontent.com/u/747913?v=3", + "node_id": "MDQ6VXNlcjc0NzkxMw==", + "avatar_url": "https://avatars1.githubusercontent.com/u/747913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/deanishe", "html_url": "https://github.com/deanishe", @@ -551,7 +869,7 @@ "content_type": "application/octet-stream", "state": "uploaded", "size": 36063, - "download_count": 0, + "download_count": 1, "created_at": "2014-09-23T18:56:22Z", "updated_at": "2014-09-23T18:56:24Z", "browser_download_url": "https://github.com/deanishe/alfred-workflow-dummy/releases/download/v1.0/Dummy-1.0.alfredworkflow" @@ -561,4 +879,4 @@ "zipball_url": "https://api.github.com/repos/deanishe/alfred-workflow-dummy/zipball/v1.0", "body": "" } -] +] \ No newline at end of file diff --git a/tests/data/gh-releases.json.gz b/tests/data/gh-releases.json.gz index 0f432a32..720729f8 100644 Binary files a/tests/data/gh-releases.json.gz and b/tests/data/gh-releases.json.gz differ diff --git a/tests/lib/youcanimportme.py b/tests/lib/youcanimportme.py index c2f17636..40a6bc2f 100644 --- a/tests/lib/youcanimportme.py +++ b/tests/lib/youcanimportme.py @@ -1,5 +1,6 @@ """Do-nothing module to test `sys.path` adjustment""" + def noop(): """Do nothing""" pass diff --git a/tests/test_background.py b/tests/test_background.py old mode 100755 new mode 100644 index 7d2c6e1e..42dbbc8c --- a/tests/test_background.py +++ b/tests/test_background.py @@ -37,7 +37,7 @@ def _delete_pidfile(name): os.unlink(pidfile) -@pytest.mark.usefixtures('info2') +@pytest.mark.usefixtures('infopl') class TestBackground(object): """Unit tests for background jobs.""" diff --git a/tests/test_notify.py b/tests/test_notify.py old mode 100755 new mode 100644 index 92b562a6..acac296f --- a/tests/test_notify.py +++ b/tests/test_notify.py @@ -16,26 +16,23 @@ import logging import os import plistlib -import unittest import shutil import stat -import tempfile import pytest from workflow import notify from workflow.workflow import Workflow +from conftest import BUNDLE_ID from util import ( FakePrograms, - InfoPlist, WorkflowMock, ) -BUNDLE_ID = 'net.deanishe.alfred-workflow' DATADIR = os.path.expanduser( - '~/Library/Application Support/Alfred 2/' - 'Workflow Data/{0}'.format(BUNDLE_ID)) + '~/Library/Application Support/Alfred/' + 'Workflow Data/' + BUNDLE_ID) APP_PATH = os.path.join(DATADIR, 'Notify.app') APPLET_PATH = os.path.join(APP_PATH, 'Contents/MacOS/applet') ICON_PATH = os.path.join(APP_PATH, 'Contents/Resources/applet.icns') @@ -46,122 +43,117 @@ 'icon.png') -class NotifyTests(unittest.TestCase): - """Tests for :mod:`workflow.notify`.""" - - def setUp(self): - self.tempdir = tempfile.mkdtemp() - if os.path.exists(APP_PATH): - shutil.rmtree(APP_PATH) - - def tearDown(self): - if os.path.exists(APP_PATH): - shutil.rmtree(APP_PATH) - if os.path.exists(self.tempdir): - shutil.rmtree(self.tempdir) - - def test_log_wf(self): - """Workflow and Logger objects correct""" - with InfoPlist(): - wf = notify.wf() - self.assert_(isinstance(wf, Workflow)) - # Always returns the same objects - wf2 = notify.wf() - self.assert_(wf is wf2) - - log = notify.log() - self.assert_(isinstance(log, logging.Logger)) - log2 = notify.log() - self.assert_(log is log2) - - def test_paths(self): - """Module paths are correct""" - with InfoPlist(): - self.assertEqual(DATADIR, notify.wf().datadir) - self.assertEqual(APPLET_PATH, notify.notifier_program()) - self.assertEqual(ICON_PATH, notify.notifier_icon_path()) - - def test_install(self): - """Notify.app is installed correctly""" - with InfoPlist(): - self.assertFalse(os.path.exists(APP_PATH)) - notify.install_notifier() - for p in (APP_PATH, APPLET_PATH, ICON_PATH, INFO_PATH): - self.assertTrue(os.path.exists(p)) - # Ensure applet is executable - self.assert_(os.stat(APPLET_PATH).st_mode & stat.S_IXUSR) - # Verify bundle ID was changed - data = plistlib.readPlist(INFO_PATH) - bid = data.get('CFBundleIdentifier') - self.assertNotEqual(bid, BUNDLE_ID) - self.assertTrue(bid.startswith(BUNDLE_ID)) - - def test_sound(self): - """Good sounds work, bad ones fail""" - # Good values - for s in ('basso', 'GLASS', 'Purr', 'tink'): - sound = notify.validate_sound(s) - self.assert_(sound is not None) - self.assertEqual(sound, s.title()) - # Bad values - for s in (None, 'SPOONS', 'The Hokey Cokey', ''): - sound = notify.validate_sound(s) - self.assert_(sound is None) - - def test_invalid_notifications(self): - """Invalid notifications""" - with InfoPlist(): - self.assertRaises(ValueError, notify.notify) - # Is not installed yet - self.assertFalse(os.path.exists(APP_PATH)) - self.assertTrue(notify.notify('Test Title', 'Test Message')) - # A notification should appear now, but there's no way of - # checking whether it worked - self.assertTrue(os.path.exists(APP_PATH)) - - def test_notifyapp_called(self): - """Notify.app is called""" - c = WorkflowMock() - with InfoPlist(): - notify.install_notifier() - with c: - self.assertFalse(notify.notify('Test Title', 'Test Message')) - self.assertEqual(c.cmd[0], APPLET_PATH) - - def test_iconutil_fails(self): - """`iconutil` throws RuntimeError""" - with InfoPlist(): - with FakePrograms('iconutil'): - icns_path = os.path.join(self.tempdir, 'icon.icns') - self.assertRaises(RuntimeError, - notify.png_to_icns, - PNG_PATH, - icns_path) - - def test_sips_fails(self): - """`sips` throws RuntimeError""" - with InfoPlist(): - with FakePrograms('sips'): - icon_path = os.path.join(self.tempdir, 'icon.png') - self.assertRaises(RuntimeError, - notify.convert_image, - PNG_PATH, icon_path, 64) - - def test_image_conversion(self): - """PNG to ICNS conversion""" - with InfoPlist(): - self.assertFalse(os.path.exists(APP_PATH)) - notify.install_notifier() - self.assertTrue(os.path.exists(APP_PATH)) - icns_path = os.path.join(self.tempdir, 'icon.icns') - self.assertFalse(os.path.exists(icns_path)) +@pytest.fixture +def applet(): + """Ensure applet doesn't exist.""" + if os.path.exists(APP_PATH): + shutil.rmtree(APP_PATH) + yield + if os.path.exists(APP_PATH): + shutil.rmtree(APP_PATH) + + +def test_log_wf(infopl, alfred4): + """Workflow and Logger objects correct""" + wf = notify.wf() + assert isinstance(wf, Workflow), "not Workflow" + # Always returns the same objects + wf2 = notify.wf() + assert wf is wf2, "not same Workflow" + + log = notify.log() + assert isinstance(log, logging.Logger), "not Logger" + log2 = notify.log() + assert log is log2, "not same Logger" + + +def test_paths(infopl, alfred4): + """Module paths are correct""" + assert DATADIR == notify.wf().datadir, "unexpected datadir" + assert APPLET_PATH == notify.notifier_program(), "unexpected applet path" + assert ICON_PATH == notify.notifier_icon_path(), "unexpected icon path" + + +def test_install(infopl, alfred4, applet): + """Notify.app is installed correctly""" + assert os.path.exists(APP_PATH) is False, "APP_PATH exists" + notify.install_notifier() + for p in (APP_PATH, APPLET_PATH, ICON_PATH, INFO_PATH): + assert os.path.exists(p) is True, "path not found" + # Ensure applet is executable + assert (os.stat(APPLET_PATH).st_mode & stat.S_IXUSR), \ + "applet not executable" + # Verify bundle ID was changed + data = plistlib.readPlist(INFO_PATH) + bid = data.get('CFBundleIdentifier') + assert bid != BUNDLE_ID, "bundle IDs identical" + assert bid.startswith(BUNDLE_ID) is True, "bundle ID not prefix" + + +def test_sound(): + """Good sounds work, bad ones fail""" + # Good values + for s in ('basso', 'GLASS', 'Purr', 'tink'): + sound = notify.validate_sound(s) + assert sound is not None + assert sound == s.title(), "unexpected title" + # Bad values + for s in (None, 'SPOONS', 'The Hokey Cokey', ''): + sound = notify.validate_sound(s) + assert sound is None + + +def test_invalid_notifications(infopl, alfred4): + """Invalid notifications""" + with pytest.raises(ValueError): + notify.notify() + # Is not installed yet + assert os.path.exists(APP_PATH) is False + assert notify.notify('Test Title', 'Test Message') is True + # A notification should appear now, but there's no way of + # checking whether it worked + assert os.path.exists(APP_PATH) is True + + +def test_notifyapp_called(infopl, alfred4): + """Notify.app is called""" + c = WorkflowMock() + notify.install_notifier() + with c: + assert notify.notify('Test Title', 'Test Message') is False + assert c.cmd[0] == APPLET_PATH + + +def test_iconutil_fails(infopl, alfred4, tempdir): + """`iconutil` throws RuntimeError""" + with FakePrograms('iconutil'): + icns_path = os.path.join(tempdir, 'icon.icns') + with pytest.raises(RuntimeError): notify.png_to_icns(PNG_PATH, icns_path) - self.assertTrue(os.path.exists(icns_path)) - with open(icns_path, 'rb') as fp: - h1 = hashlib.md5(fp.read()) - with open(ICON_PATH, 'rb') as fp: - h2 = hashlib.md5(fp.read()) - self.assertEqual(h1.digest(), h2.digest()) + + +def test_sips_fails(infopl, alfred4, tempdir): + """`sips` throws RuntimeError""" + with FakePrograms('sips'): + icon_path = os.path.join(tempdir, 'icon.png') + with pytest.raises(RuntimeError): + notify.convert_image(PNG_PATH, icon_path, 64) + + +def test_image_conversion(infopl, alfred4, tempdir, applet): + """PNG to ICNS conversion""" + assert os.path.exists(APP_PATH) is False + notify.install_notifier() + assert os.path.exists(APP_PATH) is True + icns_path = os.path.join(tempdir, 'icon.icns') + assert os.path.exists(icns_path) is False + notify.png_to_icns(PNG_PATH, icns_path) + assert os.path.exists(icns_path) is True + with open(icns_path, 'rb') as fp: + h1 = hashlib.md5(fp.read()) + with open(ICON_PATH, 'rb') as fp: + h2 = hashlib.md5(fp.read()) + assert h1.digest() == h2.digest() if __name__ == '__main__': # pragma: no cover diff --git a/tests/test_update.py b/tests/test_update.py old mode 100755 new mode 100644 index 0b2c6d3d..f20da903 --- a/tests/test_update.py +++ b/tests/test_update.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # encoding: utf-8 # -# Copyright (c) 2014 Fabio Niephaus , -# Dean Jackson +# Copyright (c) 2019 Dean Jackson # # MIT Licence. See http://opensource.org/licenses/MIT # @@ -15,31 +14,36 @@ from contextlib import contextmanager import os +import re import pytest -import pytest_localserver +import pytest_localserver # noqa: F401 -from util import WorkflowMock, create_info_plist, delete_info_plist +from util import WorkflowMock from workflow import Workflow, update, web +from workflow.update import Download, Version # Where test data is DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') # GitHub API JSON for test repos -DATA_JSON_EMPTY_PATH = os.path.join(DATA_DIR, 'gh-releases-empty.json') -DATA_JSON_PATH = os.path.join(DATA_DIR, 'gh-releases.json') -DATA_WORKFLOW_PATH = os.path.join(DATA_DIR, 'Dummy-6.0.alfredworkflow') - -# A dummy Alfred workflow -DATA_WORKFLOW = open(DATA_WORKFLOW_PATH).read() # An empty list -DATA_JSON_EMPTY = open(DATA_JSON_EMPTY_PATH).read() +RELEASES_JSON_EMPTY = '[]' # A list of valid and invalid releases. The below variables # refer to these data. -DATA_JSON = open(DATA_JSON_PATH).read() - -RELEASE_LATEST = '6.0' -RELEASE_LATEST_PRERELEASE = '7.1-beta' +RELEASES_JSON = open(os.path.join(DATA_DIR, 'gh-releases.json')).read() +RELEASES_4PLUS_JSON = open( + os.path.join(DATA_DIR, 'gh-releases-4plus.json')).read() +# A dummy Alfred workflow +DATA_WORKFLOW = open( + os.path.join(DATA_DIR, 'Dummy-6.0.alfredworkflow')).read() + +# Alfred 4 +RELEASE_LATEST = '9.0' +RELEASE_LATEST_PRERELEASE = '10.0-beta' +# Alfred 3 +RELEASE_LATEST_V3 = '6.0' +RELEASE_LATEST_PRERELEASE_V3 = '7.1-beta' RELEASE_OLDEST = '1.0' # Use this as current version RELEASE_CURRENT = '2.0' @@ -61,37 +65,31 @@ # # The repo has since been mirrored to the `tests/data` directory # (see DATA_* variables above), so the tests can run offline. -TEST_REPO_SLUG = 'deanishe/alfred-workflow-dummy' -EMPTY_REPO_SLUG = 'deanishe/alfred-workflow-empty-dummy' -GH_ROOT = 'https://github.com/' + TEST_REPO_SLUG -GH_API_ROOT = 'https://api.github.com/repos/' + TEST_REPO_SLUG +TEST_REPO = 'deanishe/alfred-workflow-dummy' +EMPTY_REPO = 'deanishe/alfred-workflow-empty-dummy' +GH_ROOT = 'https://github.com/' + TEST_REPO +GH_API_ROOT = 'https://api.github.com/repos/' + TEST_REPO RELEASES_URL = GH_API_ROOT + '/releases' -# URL_DL = GH_ROOT + '/releases/download/v4.0/Dummy-4.0.alfredworkflow' -URL_DL = GH_ROOT + '/releases/download/v6.0/Dummy-6.0.alfredworkflow' +URL_DL = 'https://github.com/releases/download/v6.0/Dummy-6.0.alfredworkflow' URL_BAD = 'http://github.com/file.zip' # INVALID_RELEASE_URL = GH_ROOT + '/releases/download/v3.0/Dummy-3.0.zip' - -@pytest.fixture(scope='module') -def info(request): - """Ensure `info.plist` exists in the working directory.""" - create_info_plist() - os.environ['alfred_version'] = '2.4' - update._wf = None - request.addfinalizer(delete_info_plist) +DL_BAD = Download(url='http://github.com/file.zip', + filename='file.zip', + version=Version('0')) @contextmanager -def fakeresponse(server, content, headers=None): +def fakeresponse(httpserver, content, headers=None): """Monkey patch `web.request()` to return the specified response.""" orig = web.request - server.serve_content(content, headers=headers) + httpserver.serve_content(content, headers=headers) def _request(*args, **kwargs): """Replace request URL with `httpserver` URL""" - print('requested URL={0!r}'.format(args[1])) - args = (args[0], server.url) + args[2:] - print('request args={0!r}'.format(args)) + # print('requested URL={!r}'.format(args[1])) + args = (args[0], httpserver.url) + args[2:] + # print('request args={!r}'.format(args)) return orig(*args, **kwargs) web.request = _request @@ -99,209 +97,208 @@ def _request(*args, **kwargs): web.request = orig -def test_download_workflow(httpserver, info): - """Download workflow update""" - headers = { - 'Content-Type': 'application/octet-stream', - 'Content-Disposition': 'attachment; filename=Dummy-6.0.alfredworkflow', - } - with fakeresponse(httpserver, DATA_WORKFLOW, headers): - local_file = update.download_workflow(URL_DL) - assert local_file.endswith('.alfredworkflow') - assert os.path.isfile(local_file) is True +def test_parse_releases(infopl, alfred4): + """Parse releases JSON""" + dls = Download.from_releases(RELEASES_JSON) + assert len(dls) == len(VALID_DOWNLOADS), "wrong no. of downloads" + + for i, dl in enumerate(dls): + print('dl=%r, x=%r' % (dl, VALID_DOWNLOADS[i])) + assert dl == VALID_DOWNLOADS[i], "different downloads" -def test_bad_download_url(info): +def test_compare_downloads(): + """Compare Downloads""" + dl = Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v11/Dummy-11.0.alfredworkflow", # noqa: E501 + "Dummy-11.0.alfredworkflow", + "v11", + False) + + for other in VALID_DOWNLOADS: + assert dl > other, "unexpected comparison" + assert dl != other, "unexpected equality" + + +def test_bad_download_url(infopl, alfred4): """Bad update download URL""" with pytest.raises(ValueError): - update.download_workflow(URL_BAD) + update.retrieve_download(DL_BAD) -def test_valid_api_url(info): +def test_valid_api_url(infopl, alfred4): """API URL for valid slug""" - url = update.build_api_url(TEST_REPO_SLUG) + url = update.build_api_url(TEST_REPO) assert url == RELEASES_URL -def test_invalid_api_url(info): +def test_invalid_api_url(infopl, alfred4): """API URL for invalid slug""" with pytest.raises(ValueError): update.build_api_url('fniephausalfred-workflow') -def test_empty_repo(httpserver, info): +def test_empty_repo(httpserver, infopl): """No releases""" - # with webget(httpserver.url): - with fakeresponse(httpserver, DATA_JSON_EMPTY, HTTP_HEADERS_JSON): - update.check_update(EMPTY_REPO_SLUG, '1.0') - assert len(update.get_valid_releases(EMPTY_REPO_SLUG)) == 0 - - -def test_valid_releases(httpserver, info): - """Valid releases""" - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - releases = update.get_valid_releases(TEST_REPO_SLUG) - # Correct number of releases - assert len(releases) == 3 - - # Invalid releases are not in the list - versions = [d['version'] for d in releases] - for v in RELEASES_INVALID: - assert v not in versions - - # Correct latest release - assert update.Version(releases[0]['version']) == \ - update.Version(RELEASE_LATEST) - - -def test_valid_releases_with_prereleases(httpserver, info): - """Valid releases with prereleases""" - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - releases = update.get_valid_releases(TEST_REPO_SLUG, prereleases=True) - - # Correct number of releases - assert len(releases) == 4 - - # Invalid releases are not in the list - versions = [d['version'] for d in releases] - for v in RELEASES_INVALID: - assert v not in versions - - # Correct latest release - assert update.Version(releases[0]['version']) == \ - update.Version(RELEASE_LATEST_PRERELEASE) - - -def test_version_formats(httpserver, info): - """Version formats""" - falsey = ( - # Up-to-date versions - '6.0', 'v6.0', - # Unknown versions - 'v8.0', '8.0', - ) - truthy = ( - # Old versions - 'v5.0', '5.0', + with fakeresponse(httpserver, RELEASES_JSON_EMPTY, HTTP_HEADERS_JSON): + update.check_update(EMPTY_REPO, '1.0') + assert len(update.get_downloads(EMPTY_REPO)) == 0 + + +def test_valid_downloads(httpserver, infopl, alfred4): + """Valid downloads""" + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + dls = update.get_downloads(TEST_REPO) + + assert len(dls) == len(VALID_DOWNLOADS), "wrong no. of downloads" + + for i, dl in enumerate(dls): + print('dl=%r, x=%r' % (dl, VALID_DOWNLOADS[i])) + assert dl == VALID_DOWNLOADS[i], "different downloads" + + +def test_latest_download(infopl): + """Latest download for Alfred version.""" + dls = Download.from_releases(RELEASES_JSON) + tests = ( + # downloads, alfred version, prereleases, wanted result + ([], None, False, None), + (dls, None, False, '9.0'), + (dls, None, True, '10.0-beta'), + (dls, '4', False, '9.0'), + (dls, '4', True, '10.0-beta'), + (dls, '3', False, '6.0'), + (dls, '3', True, '10.0-beta'), ) - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - for vstr in falsey: - assert update.check_update(TEST_REPO_SLUG, vstr) is False - for vstr in truthy: - assert update.check_update(TEST_REPO_SLUG, vstr) is True + for data, version, pre, wanted in tests: + dl = update.latest_download(data, version, pre) + if wanted is None: + assert dl is None, "latest is not None" + else: + assert dl.version == Version(wanted), "unexpected version" -def test_prerelease_version_formats(httpserver, info): - """Prerelease version formats""" - falsey = ( - # Up-to-date versions - '7.1.0-beta', 'v7.1.0-beta', - # Unknown versions - 'v8.0', '8.0', - ) - truthy = ( - # Old versions - 'v5.0', '5.0', +def test_version_formats(httpserver, infopl, alfred4): + """Version formats""" + tests = ( + # current version, prereleases, alfred version, expected value + ('6.0', False, None, True), + ('6.0', False, '4', True), + ('6.0', False, '3', False), + ('6.0', True, None, True), + ('6.0', True, '4', True), + ('6.0', True, '3', True), + ('9.0', False, None, False), + ('9.0', False, '4', False), + ('9.0', False, '3', False), + ('9.0', True, None, True), + ('9.0', True, '4', True), + ('9.0', True, '3', True), ) - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - for vstr in falsey: - assert update.check_update(TEST_REPO_SLUG, vstr, - prereleases=True) is False - for vstr in truthy: - assert update.check_update(TEST_REPO_SLUG, vstr, - prereleases=True) is True + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + for current, pre, alfred, wanted in tests: + v = update.check_update(TEST_REPO, current, pre, alfred) + assert v == wanted, "unexpected update status" -def test_check_update(httpserver, info): +def test_check_update(httpserver, infopl, alfred4): """Check update""" - wf = Workflow() - wf.reset() - - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - assert update.check_update(TEST_REPO_SLUG, - RELEASE_CURRENT) is True - - update_info = wf.cached_data('__workflow_update_status') - assert update_info is not None - assert wf.update_available is True - - assert update.check_update(TEST_REPO_SLUG, - update_info['version']) is False - - -def test_check_update_with_prereleases(httpserver, info): - """Check update with prereleases""" - wf = Workflow() - wf.reset() + key = '__workflow_latest_version' + tests = [ + # data, alfred version, pre, expected value + (RELEASES_JSON, None, False, True), + (RELEASES_JSON, '3', False, True), + (RELEASES_4PLUS_JSON, None, False, True), + (RELEASES_4PLUS_JSON, '3', False, False), + (RELEASES_4PLUS_JSON, '3', True, False), + ] + + for data, alfred, pre, wanted in tests: + wf = Workflow() + wf.reset() - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - assert update.check_update(TEST_REPO_SLUG, - RELEASE_CURRENT, - prereleases=True) is True + with fakeresponse(httpserver, data, HTTP_HEADERS_JSON): + v = update.check_update(TEST_REPO, RELEASE_CURRENT, + pre, alfred) + assert v == wanted, "unexpected update status" - update_info = wf.cached_data('__workflow_update_status') - assert update_info is not None - assert wf.update_available is True + status = wf.cached_data(key) + assert status is not None + assert status['available'] == wanted + assert wf.update_available == wanted - assert update.check_update(TEST_REPO_SLUG, - update_info['version'], - prereleases=True) is False + if wanted: # other data may not be set if available is False + v = update.check_update(TEST_REPO, status['version'], + pre, alfred) + assert v is False -def test_install_update(httpserver, info): +def test_install_update(httpserver, infopl, alfred4): """Update is installed.""" + key = '__workflow_latest_version' # Clear any cached data wf = Workflow() wf.reset() # Assert cache was cleared - assert wf.cached_data('__workflow_update_status') is None + assert wf.cached_data(key) is None - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - # No update for latest release + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + # No update because no update status has been cached assert update.install_update() is False # Check for updates - assert update.check_update(TEST_REPO_SLUG, RELEASE_CURRENT) is True + v = update.check_update(TEST_REPO, RELEASE_CURRENT) + assert v is True # Verify new workflow is downloaded and installed - c = WorkflowMock() - with c: + with WorkflowMock() as c: assert update.install_update() is True + assert c.cmd[0] == 'open' + assert re.search(r'\.alfred(\d+)?workflow$', c.cmd[1]) + + assert wf.cached_data(key)['available'] is False + + # Test mangled update data + status = wf.cached_data(key) + assert status['available'] is False + assert status['download'] is None + assert status['version'] is None + # Flip available bit, but leave rest invalid + status['available'] = True + wf.cache_data(key, status) - assert c.cmd[0] == 'open' - assert c.cmd[1].endswith('.alfredworkflow') - assert wf.cached_data( - '__workflow_update_status')['available'] is False + with WorkflowMock(): + assert update.install_update() is False -def test_no_auto_update(info): +def test_no_auto_update(infopl, alfred4): """No update check.""" + key = '__workflow_latest_version' wf = Workflow() wf.reset() # Assert cache was cleared - assert wf.cached_data('__workflow_update_status') is None + assert wf.cached_data(key) is None c = WorkflowMock(['script', 'workflow:noautoupdate']) with c: wf = Workflow() wf.args assert wf.settings.get('__workflow_autoupdate') is False - assert wf.cached_data('__workflow_update_status') is None + assert wf.cached_data(key) is None c = WorkflowMock() with c: wf = Workflow(update_settings={ - 'github_slug': TEST_REPO_SLUG, + 'github_slug': TEST_REPO, 'version': RELEASE_CURRENT }) - assert wf.cached_data('__workflow_update_status') is None + assert wf.cached_data(key) is None -def test_update_nondefault_serialiser(httpserver, info): +def test_update_nondefault_serialiser(httpserver, infopl, alfred4): """Check update works when a custom serialiser is set on Workflow https://github.com/deanishe/alfred-workflow/issues/113 @@ -310,12 +307,52 @@ def test_update_nondefault_serialiser(httpserver, info): wf.cache_serializer = 'json' wf.reset() - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - assert update.check_update(TEST_REPO_SLUG, + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + assert update.check_update(TEST_REPO, RELEASE_CURRENT) is True assert wf.update_available is True +VALID_DOWNLOADS = [ + # Latest version for Alfred 4 + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v10.0-beta/Dummy-10.0-beta.alfredworkflow", # noqa: E501 + "Dummy-10.0-beta.alfredworkflow", + "v10.0-beta", + True), + # Latest stable version for Alfred 4 + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v9.0/Dummy-9.0.alfred4workflow", # noqa: E501 + "Dummy-9.0.alfred4workflow", + "v9.0", + False), + # Latest version for Alfred 3 + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v7.1.0-beta/Dummy-7.1-beta.alfredworkflow", # noqa: E501 + "Dummy-7.1-beta.alfredworkflow", + "v7.1.0-beta", + True), + # Latest stable version for Alfred 3 + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfred4workflow", # noqa: E501 + "Dummy-6.0.alfred4workflow", + "v6.0", + False), + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfred3workflow", # noqa: E501 + "Dummy-6.0.alfred3workflow", + "v6.0", + False), + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v6.0/Dummy-6.0.alfredworkflow", # noqa: E501 + "Dummy-6.0.alfredworkflow", + "v6.0", + False), + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v2.0/Dummy-2.0.alfredworkflow", # noqa: E501 + "Dummy-2.0.alfredworkflow", + "v2.0", + False), + Download("https://github.com/deanishe/alfred-workflow-dummy/releases/download/v1.0/Dummy-1.0.alfredworkflow", # noqa: E501 + "Dummy-1.0.alfredworkflow", + "v1.0", + False), +] + + if __name__ == '__main__': # pragma: no cover pytest.main([__file__]) diff --git a/tests/test_update_alfred3.py b/tests/test_update_alfred3.py deleted file mode 100755 index e0bdb1cc..00000000 --- a/tests/test_update_alfred3.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -# -# Copyright (c) 2014 Fabio Niephaus , -# Dean Jackson -# -# MIT Licence. See http://opensource.org/licenses/MIT -# -# Created on 2014-08-16 -# - -"""Unit tests for Alfred 3 updates.""" - -from __future__ import print_function - -import os - -import pytest - -from util import create_info_plist, delete_info_plist, INFO_PLIST_TEST3 -from workflow import update - - -@pytest.fixture(scope='module') -def info(request): - """Ensure `info.plist` exists in the working directory.""" - create_info_plist() - os.environ['alfred_version'] = '2.4' - update._wf = None - request.addfinalizer(delete_info_plist) - - -@pytest.fixture(scope='module') -def info3(request): - """Ensure `info.plist` exists in the working directory.""" - create_info_plist(INFO_PLIST_TEST3) - os.environ['alfred_version'] = '3.0.2' - update._wf = None - request.addfinalizer(delete_info_plist) - - -def test_valid_releases_alfred2(info): - """Valid releases for Alfred 2.""" - # Valid release for 2 & 3 - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfredworkflow'}], - 'prerelease': False}) - - assert r is not None - assert r['prerelease'] is False - assert r['download_url'] == 'blah.alfredworkflow' - - # Valid release for 3 only - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfred3workflow'}], - 'prerelease': False}) - - assert r is None - - # Invalid release - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfred3workflow'}, - {'browser_download_url': - 'blah2.alfred3workflow'}], - 'prerelease': False}) - - assert r is None - - # Valid for 2 & 3 with separate workflows - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfredworkflow'}, - {'browser_download_url': - 'blah.alfred3workflow'}], - 'prerelease': False}) - - assert r is not None - assert r['version'] == 'v1.2' - assert r['download_url'] == 'blah.alfredworkflow' - - -def test_valid_releases_alfred3(info3): - """Valid releases for Alfred 3.""" - # Valid release for 2 & 3 - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfredworkflow'}], - 'prerelease': False}) - - assert r is not None - assert r['download_url'] == 'blah.alfredworkflow' - - # Valid release for 3 only - print('alfred_version=', os.environ['alfred_version']) - print('version=', update.wf().alfred_version) - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfred3workflow'}], - 'prerelease': False}) - - assert r is not None - assert r['download_url'] == 'blah.alfred3workflow' - - # Invalid release - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfred3workflow'}, - {'browser_download_url': - 'blah2.alfred3workflow'}], - 'prerelease': False}) - - assert r is None - - # Valid for 2 & 3 with separate workflows - r = update._validate_release({'tag_name': 'v1.2', 'assets': [ - {'browser_download_url': - 'blah.alfredworkflow'}, - {'browser_download_url': - 'blah.alfred3workflow'}], - 'prerelease': False}) - - assert r is not None - assert r['version'] == 'v1.2' - assert r['download_url'] == 'blah.alfred3workflow' - - -if __name__ == '__main__': # pragma: no cover - pytest.main([__file__]) diff --git a/tests/test_update_versions.py b/tests/test_update_versions.py old mode 100755 new mode 100644 index 0e379024..14a409d4 --- a/tests/test_update_versions.py +++ b/tests/test_update_versions.py @@ -23,12 +23,15 @@ class VersionTests(unittest.TestCase): """Unit tests for Version.""" def setUp(self): + """Initialise unit test data.""" self.invalid_versions = [ + '', 'bob', '1.x.8', '1.0b', '1.0.3a', '1.0.0.0', + '1.2.3.4', ] self.valid_versions = [ ('1', '1.0.0'), @@ -69,18 +72,6 @@ def test_compare_bad_objects(self): self.assertRaises(ValueError, lambda v, t: v != t, v, t) self.assertRaises(ValueError, lambda v, t: v > t, v, t) self.assertRaises(ValueError, lambda v, t: v < t, v, t) - # with self.assertRaises(ValueError): - # Version('1.0.0') == (1, 0, 0) - # with self.assertRaises(ValueError): - # Version('1.0.0') >= (1, 0, 0) - # with self.assertRaises(ValueError): - # Version('1.0.0') <= (1, 0, 0) - # with self.assertRaises(ValueError): - # Version('1.0.0') != (1, 0, 0) - # with self.assertRaises(ValueError): - # Version('1.0.0') > (1, 0, 0) - # with self.assertRaises(ValueError): - # Version('1.0.0') < (1, 0, 0) def test_compare_versions(self): """Versions: comparisons""" diff --git a/tests/test_util.py b/tests/test_util.py index 5810ee83..58ab4c84 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -13,7 +13,6 @@ from __future__ import print_function, absolute_import -# from collections import namedtuple import os import shutil import subprocess @@ -21,12 +20,11 @@ import pytest +from .conftest import env from workflow.util import ( - AS_TRIGGER, - AS_CONFIG_SET, - AS_CONFIG_UNSET, appinfo, applescriptify, + jxa_app_name, run_applescript, run_command, run_jxa, @@ -37,30 +35,7 @@ utf8ify, ) - -class MockCall(object): - """Captures calls to `subprocess.check_output`.""" - - def __init__(self): - self.cmd = None - self._check_output_orig = None - - def set_up(self): - self._check_output_orig = subprocess.check_output - subprocess.check_output = self._check_output - - def tear_down(self): - subprocess.check_output = self._check_output_orig - - def _check_output(self, cmd, **kwargs): - self.cmd = cmd - - def __enter__(self): - self.set_up() - return self - - def __exit__(self, *args): - self.tear_down() +from .util import MockCall @pytest.fixture(scope='function') @@ -191,32 +166,63 @@ def test_run_jxa(testfile): assert out.strip() == '1' -def test_run_trigger(): - """Call External Trigger.""" +def test_app_name(): + """Appname""" + tests = [ + (None, 'com.runningwithcrayons.Alfred'), + ('', 'com.runningwithcrayons.Alfred'), + ('4', 'com.runningwithcrayons.Alfred'), + ('5', 'com.runningwithcrayons.Alfred'), + ('twelty', 'com.runningwithcrayons.Alfred'), + ('3', 'Alfred 3'), + ('3.8', 'Alfred 3'), + ('3.1-beta', 'Alfred 3'), + ('3thirty', 'Alfred 3'), + ] + + for version, wanted in tests: + with env(alfred_version=version): + assert jxa_app_name() == wanted, "unexpected appname" + + +def test_run_trigger(alfred4): + """Call External Trigger""" name = 'test' bundleid = 'net.deanishe.alfred-workflow' arg = 'test arg' - argclause = 'with argument "test arg"' # With bundle ID - script = AS_TRIGGER.format(name=name, bundleid=bundleid, arg='') - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.runTrigger("test", ' + '{"inWorkflow": "net.deanishe.alfred-workflow"});' + ) + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: run_trigger(name, bundleid) assert m.cmd == cmd # With arg - script = AS_TRIGGER.format(name=name, bundleid=bundleid, arg=argclause) - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.runTrigger("test", ' + '{"inWorkflow": "net.deanishe.alfred-workflow", ' + '"withArgument": "test arg"});' + ) + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: run_trigger(name, bundleid, arg) assert m.cmd == cmd # With bundle ID from env + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.runTrigger("test", ' + '{"inWorkflow": "net.deanishe.alfred-workflow"});' + ) os.environ['alfred_workflow_bundleid'] = bundleid try: - script = AS_TRIGGER.format(name=name, bundleid=bundleid, arg='') - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: run_trigger(name) assert m.cmd == cmd @@ -224,7 +230,7 @@ def test_run_trigger(): del os.environ['alfred_workflow_bundleid'] -def test_set_config(): +def test_set_config(alfred4): """Set Configuration.""" name = 'test' bundleid = 'net.deanishe.alfred-workflow' @@ -232,33 +238,54 @@ def test_set_config(): # argclause = 'with argument "test arg"' # With bundle ID - script = AS_CONFIG_SET.format(name=name, value=value, - bundleid=bundleid, - export='exportable false') - - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.setConfiguration("test", ' + '{"exportable": false, ' + '"inWorkflow": "net.deanishe.alfred-workflow", ' + '"toValue": "test"});' + ) + # script = AS_CONFIG_SET.format(name=name, value=value, + # bundleid=bundleid, + # export='exportable false') + + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: set_config(name, value, bundleid) assert m.cmd == cmd # With exportable - script = AS_CONFIG_SET.format(name=name, value=value, - bundleid=bundleid, - export='exportable true') - - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.setConfiguration("test", ' + '{"exportable": true, ' + '"inWorkflow": "net.deanishe.alfred-workflow", ' + '"toValue": "test"});' + ) + # script = AS_CONFIG_SET.format(name=name, value=value, + # bundleid=bundleid, + # export='exportable true') + + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: set_config(name, value, bundleid, True) assert m.cmd == cmd # With bundle ID from env os.environ['alfred_workflow_bundleid'] = bundleid + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.setConfiguration("test", ' + '{"exportable": false, ' + '"inWorkflow": "net.deanishe.alfred-workflow", ' + '"toValue": "test"});' + ) try: - script = AS_CONFIG_SET.format(name=name, value=value, - bundleid=bundleid, - export='exportable false') + # script = AS_CONFIG_SET.format(name=name, value=value, + # bundleid=bundleid, + # export='exportable false') - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: set_config(name, value) assert m.cmd == cmd @@ -266,17 +293,19 @@ def test_set_config(): del os.environ['alfred_workflow_bundleid'] -def test_unset_config(): +def test_unset_config(alfred4): """Unset Configuration.""" name = 'test' bundleid = 'net.deanishe.alfred-workflow' - value = 'test' # argclause = 'with argument "test arg"' # With bundle ID - script = AS_CONFIG_UNSET.format(name=name, bundleid=bundleid) - - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + script = ( + 'Application("com.runningwithcrayons.Alfred")' + '.removeConfiguration("test", ' + '{"inWorkflow": "net.deanishe.alfred-workflow"});' + ) + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: unset_config(name, bundleid) assert m.cmd == cmd @@ -284,9 +313,7 @@ def test_unset_config(): # With bundle ID from env os.environ['alfred_workflow_bundleid'] = bundleid try: - script = AS_CONFIG_UNSET.format(name=name, bundleid=bundleid) - - cmd = ['/usr/bin/osascript', '-l', 'AppleScript', '-e', script] + cmd = ['/usr/bin/osascript', '-l', 'JavaScript', '-e', script] with MockCall() as m: unset_config(name) assert m.cmd == cmd diff --git a/tests/test_util_atomic.py b/tests/test_util_atomic.py old mode 100755 new mode 100644 diff --git a/tests/test_util_lockfile.py b/tests/test_util_lockfile.py old mode 100755 new mode 100644 index 887daccd..c8d0943b --- a/tests/test_util_lockfile.py +++ b/tests/test_util_lockfile.py @@ -55,57 +55,6 @@ def test_lockfile_created(paths): assert not os.path.exists(paths.lockfile) -# def test_lockfile_contains_pid(paths): -# """Lockfile contains process PID.""" -# assert not os.path.exists(paths.testfile) -# assert not os.path.exists(paths.lockfile) - -# with LockFile(paths.testfile, timeout=0.2): -# with open(paths.lockfile) as fp: -# s = fp.read() - -# assert s == str(os.getpid()) - - -# def test_invalid_lockfile_removed(paths): -# """Invalid lockfile removed.""" -# assert not os.path.exists(paths.testfile) -# assert not os.path.exists(paths.lockfile) - -# # create invalid lock file -# with open(paths.lockfile, 'wb') as fp: -# fp.write("dean woz 'ere!") - -# # the above invalid lockfile should be removed and -# # replaced with one containing this process's PID -# with LockFile(paths.testfile, timeout=0.2): -# with open(paths.lockfile) as fp: -# s = fp.read() - -# assert s == str(os.getpid()) - - -# def test_stale_lockfile_removed(paths): -# """Stale lockfile removed.""" -# assert not os.path.exists(paths.testfile) -# assert not os.path.exists(paths.lockfile) - -# p = subprocess.Popen('true') -# pid = p.pid -# p.wait() -# # create invalid lock file -# with open(paths.lockfile, 'wb') as fp: -# fp.write(str(pid)) - -# # the above invalid lockfile should be removed and -# # replaced with one containing this process's PID -# with LockFile(paths.testfile, timeout=0.2): -# with open(paths.lockfile) as fp: -# s = fp.read() - -# assert s == str(os.getpid()) - - def test_sequential_access(paths): """Sequential access to locked file.""" assert not os.path.exists(paths.testfile) @@ -146,7 +95,8 @@ def test_concurrent_access(paths): lock = LockFile(paths.testfile, 0.5) pool = Pool(5) - pool.map(_write_test_data, [(paths, str(i) * 20) for i in range(1, 6)]) + pool.map(_write_test_data, + [(paths, str(i) * 20) for i in range(1, 6)]) assert not lock.locked assert not os.path.exists(paths.lockfile) @@ -166,7 +116,8 @@ def _write_settings(args): s[key] = value print('Settings[{0}] = {1}'.format(key, value)) except Exception as err: - print('error opening settings (%s): %s' % (key, traceback.format_exc()), + print('error opening settings (%s): %s' % (key, + traceback.format_exc()), file=sys.stderr) return err diff --git a/tests/test_util_uninterruptible.py b/tests/test_util_uninterruptible.py old mode 100755 new mode 100644 diff --git a/tests/test_web.py b/tests/test_web.py old mode 100755 new mode 100644 index 00a6a655..2cdb89f9 --- a/tests/test_web.py +++ b/tests/test_web.py @@ -25,7 +25,7 @@ import pytest import pytest_httpbin -import pytest_localserver +import pytest_localserver # noqa: F401 from workflow import web @@ -37,10 +37,12 @@ class CaseInsensitiveDictTests(unittest.TestCase): """Unit tests for CaseInsensitiveDict""" def setUp(self): + """Initialise test environment.""" self.data_list = [('Aardvark', 'a'), ('Booty', 'b'), ('Clown', 'c')] self.data_dict = dict(self.data_list) def tearDown(self): + """Reset test environment.""" pass def test_init_dict(self): @@ -50,7 +52,7 @@ def test_init_dict(self): self.assertEqual(d1, d2) def test_retrieve(self): - """CaseInsensitiveDict retrieve values""" + """Retrieve CaseInsensitiveDict values""" d = web.CaseInsensitiveDictionary(self.data_list) for k, v in self.data_list: self.assertEqual(v, d[k]) @@ -70,7 +72,7 @@ def test_retrieve(self): self.assertFalse('This is not a key' in d) def test_set(self): - """CaseInsensitiveDict set values""" + """Set CaseInsensitiveDict values""" d = web.CaseInsensitiveDictionary() for k, v in self.data_list: self.assertFalse(k in d) @@ -96,7 +98,7 @@ def test_set(self): self.assertEqual(d.get(k), v) def test_iterators(self): - """CaseInsensitiveDict iterators""" + """Iterate CaseInsensitiveDict""" d = web.CaseInsensitiveDictionary(self.data_dict) self.assertEqual(sorted(d.keys()), sorted(self.data_dict.keys())) @@ -116,9 +118,10 @@ def test_iterators(self): @pytest_httpbin.use_class_based_httpbin class WebTests(unittest.TestCase): - """Unit tests for workflow.web.""" + """Unit tests for workflow.web""" def setUp(self): + """Initialise unit test environment.""" self.data = {'name': 'My name is Jürgen!', 'address': 'Hürterstr. 42\nEssen'} self.test_file = os.path.join(DATA_DIR, @@ -127,6 +130,7 @@ def setUp(self): 'web_py.{0:d}.tmp'.format(os.getpid())) def tearDown(self): + """Reset unit test environment.""" if os.path.exists(self.tempdir): shutil.rmtree(self.tempdir) diff --git a/tests/test_web_http_encoding.py b/tests/test_web_http_encoding.py old mode 100755 new mode 100644 index ac921742..e052f461 --- a/tests/test_web_http_encoding.py +++ b/tests/test_web_http_encoding.py @@ -15,7 +15,7 @@ import os import pytest -import pytest_localserver +import pytest_localserver # noqa: F401 from workflow import web diff --git a/tests/test_workflow.py b/tests/test_workflow.py old mode 100755 new mode 100644 index 19542da8..d543cc5d --- a/tests/test_workflow.py +++ b/tests/test_workflow.py @@ -12,1025 +12,99 @@ from __future__ import print_function, unicode_literals -import json import logging import os -import shutil -from StringIO import StringIO import sys -import time -import unittest from unicodedata import normalize -from util import ( - VersionFile, - InfoPlist, - INFO_PLIST_PATH, - create_info_plist, - delete_info_plist, -) - -from workflow.workflow import (Workflow, PasswordNotFound, - KeychainError, MATCH_ALL, MATCH_ALLCHARS, - MATCH_ATOM, MATCH_CAPITALS, MATCH_STARTSWITH, - MATCH_SUBSTRING, MATCH_INITIALS_CONTAIN, - MATCH_INITIALS_STARTSWITH, - manager) - -from workflow.update import Version - -# info.plist settings -BUNDLE_ID = 'net.deanishe.alfred-workflow' -WORKFLOW_NAME = 'Alfred-Workflow Test' - - -class WorkflowTests(unittest.TestCase): - """Test suite for workflow.workflow.Workflow.""" - - def setUp(self): - self.libs = [os.path.join(os.path.dirname(__file__), b'lib')] - self.account = 'this-is-my-test-account' - self.password = 'this-is-my-safe-password' - self.password2 = 'this-is-my-other-safe-password' - self.password3 = 'this-pässwörd-is\\"non-ASCII"' - self.search_items = [ - ('Test Item One', MATCH_STARTSWITH), - ('test item two', MATCH_STARTSWITH), - ('TwoExtraSpecialTests', MATCH_CAPITALS), - ('this-is-a-test', MATCH_ATOM), - ('the extra special trials', MATCH_INITIALS_STARTSWITH), - ('not the extra special trials', MATCH_INITIALS_CONTAIN), - ('intestinal fortitude', MATCH_SUBSTRING), - ('the splits', MATCH_ALLCHARS), - ('nomatch', 0), - ] - - self.search_items_diacritics = [ - # search key, query - ('Änderungen vorbehalten', 'av'), - ('Änderungen', 'anderungen'), - ('überwiegend bewolkt', 'ub'), - ('überwiegend', 'uberwiegend'), - ('Öffnungszeiten an Feiertagen', 'offnungszeiten'), - ('Öffnungszeiten an Feiertagen', 'oaf'), - ('Fußpilz', 'fuss'), - ('salé', 'sale') - ] - - self.punctuation_data = [ - ('"test"', '"test"'), - ('„wat denn?“', '"wat denn?"'), - ('‚wie dat denn?‘', "'wie dat denn?'"), - ('“test”', '"test"'), - ('and—why—not', 'and-why-not'), - ('10–20', '10-20'), - ('Shady’s back', "Shady's back"), - ] - - self.env_data = { - 'alfred_debug': b'1', - 'alfred_preferences': - os.path.expanduser(b'~/Dropbox/Alfred/Alfred.alfredpreferences'), - 'alfred_preferences_localhash': - b'adbd4f66bc3ae8493832af61a41ee609b20d8705', - 'alfred_theme': b'alfred.theme.yosemite', - 'alfred_theme_background': b'rgba(255,255,255,0.98)', - 'alfred_theme_subtext': b'3', - 'alfred_version': b'2.4', - 'alfred_version_build': b'277', - 'alfred_workflow_bundleid': str(BUNDLE_ID), - 'alfred_workflow_cache': - os.path.expanduser(b'~/Library/Caches/com.runningwithcrayons.' - b'Alfred-2/Workflow Data/{0}'.format(BUNDLE_ID)), - 'alfred_workflow_data': - os.path.expanduser(b'~/Library/Application Support/Alfred 2/' - b'Workflow Data/{0}'.format(BUNDLE_ID)), - 'alfred_workflow_name': b'Alfred-Workflow Test', - 'alfred_workflow_uid': - b'user.workflow.B0AC54EC-601C-479A-9428-01F9FD732959', - } - - self._setup_env() - create_info_plist() - - self.wf = Workflow(libraries=self.libs) - - def tearDown(self): - create_info_plist() - self.wf.reset() - try: - self.wf.delete_password(self.account) - except PasswordNotFound: - pass - - for dirpath in (self.wf.cachedir, self.wf.datadir): - if os.path.exists(dirpath): - shutil.rmtree(dirpath) - - self._teardown_env() - delete_info_plist() - - #################################################################### - # Environment - #################################################################### - - def test_additional_libs(self): - """Additional libraries""" - for path in self.libs: - self.assert_(path in sys.path) - self.assertEqual(sys.path[0:len(self.libs)], self.libs) - import youcanimportme - youcanimportme.noop() - - def test_info_plist(self): - """info.plist""" - self._teardown_env() - self.assertEqual(self.wf.name, WORKFLOW_NAME) - self.assertEqual(self.wf.bundleid, BUNDLE_ID) - - def test_info_plist_missing(self): - """Info.plist missing""" - # delete_info_plist() - self._teardown_env() - with InfoPlist(present=False): - wf = Workflow() - self.assertFalse(os.path.exists(INFO_PLIST_PATH)) - # self.assertRaises(IOError, lambda wf: wf.info, wf) - self.assertRaises(IOError, lambda: wf.workflowdir) - # try: - # self.assertRaises(IOError, Workflow) - # finally: - # create_info_plist() - - def test_alfred_env_vars(self): - """Alfred environmental variables""" - for key in self.env_data: - value = self.env_data[key] - key = key.replace('alfred_', '') - if key in ('debug', 'version_build', 'theme_subtext'): - self.assertEqual(int(value), self.wf.alfred_env[key]) - else: - self.assertEqual(unicode(value), self.wf.alfred_env[key]) - self.assertTrue(isinstance(self.wf.alfred_env[key], unicode)) - - self.assertEqual(self.wf.datadir, - self.env_data['alfred_workflow_data']) - self.assertEqual(self.wf.cachedir, - self.env_data['alfred_workflow_cache']) - self.assertEqual(self.wf.bundleid, - self.env_data['alfred_workflow_bundleid']) - self.assertEqual(self.wf.name, - self.env_data['alfred_workflow_name']) - - def test_alfred_debugger(self): - """Alfred debugger status""" - wf = Workflow() - self.assertTrue(wf.debugging) # Alfred's debugger is open - self.assertEqual(wf.logger.getEffectiveLevel(), logging.DEBUG) - - # With debugger off - self._teardown_env() - data = self.env_data.copy() - del data['alfred_debug'] - self._setup_env(data) - wf = Workflow() - self.assertFalse(wf.debugging) # Alfred's debugger is closed - self.assertEqual(wf.logger.getEffectiveLevel(), logging.INFO) - - #################################################################### - # ARGV - #################################################################### - - def test_args(self): - """ARGV""" - args = ['arg1', 'arg2', 'füntíme'] - oargs = sys.argv[:] - sys.argv = [oargs[0]] + [s.encode('utf-8') for s in args] - wf = Workflow() - try: - self.assertEqual(wf.args, args) - finally: - sys.argv = oargs[:] - - def test_arg_normalisation(self): - """ARGV normalisation""" - def nfdme(s): - """NFD-normalise string""" - return normalize('NFD', s) - - args = [nfdme(s) for s in ['arg1', 'arg2', 'füntíme']] - oargs = sys.argv[:] - sys.argv = [oargs[0]] + [s.encode('utf-8') for s in args] - - wf = Workflow(normalization='NFD') - try: - self.assertEqual(wf.args, args) - finally: - sys.argv = oargs[:] - - def test_magic_args(self): - """Magic args""" - # cache original sys.argv - oargs = sys.argv[:] - - # delsettings - sys.argv = [oargs[0]] + [b'workflow:delsettings'] - try: - wf = Workflow(default_settings={'arg1': 'value1'}) - self.assertEqual(wf.settings['arg1'], 'value1') - self.assertTrue(os.path.exists(wf.settings_path)) - self.assertRaises(SystemExit, lambda wf: wf.args, wf) - self.assertFalse(os.path.exists(wf.settings_path)) - finally: - sys.argv = oargs[:] - - # delcache - sys.argv = [oargs[0]] + [b'workflow:delcache'] - - def somedata(): - return {'arg1': 'value1'} - - try: - wf = Workflow() - cachepath = wf.cachefile('somedir') - os.makedirs(cachepath) - wf.cached_data('test', somedata) - self.assertTrue(os.path.exists(wf.cachefile('test.cpickle'))) - self.assertRaises(SystemExit, lambda wf: wf.args, wf) - self.assertFalse(os.path.exists(wf.cachefile('test.cpickle'))) - finally: - sys.argv = oargs[:] - - def test_logger(self): - """Logger""" - self.assert_(isinstance(self.wf.logger, logging.Logger)) - logger = logging.Logger('') - self.wf.logger = logger - self.assertEqual(self.wf.logger, logger) - - #################################################################### - # Cached data - #################################################################### - - def test_cached_data(self): - """Cached data stored""" - data = {'key1': 'value1'} - d = self.wf.cached_data('test', lambda: data, max_age=10) - self.assertEqual(data, d) - - def test_cached_data_deleted(self): - """Cached data deleted""" - data = {'key1': 'value1'} - d = self.wf.cached_data('test', lambda: data, max_age=10) - self.assertEqual(data, d) - ret = self.wf.cache_data('test', None) - self.assertEquals(ret, None) - self.assertFalse(os.path.exists(self.wf.cachefile('test.cpickle'))) - # Test alternate code path for non-existent file - self.assertEqual(self.wf.cache_data('test', None), None) - - def test_delete_all_cache_file(self): - """Cached data are all deleted""" - data = {'key1': 'value1'} - test_file1 = 'test1.cpickle' - test_file2 = 'test2.cpickle' - - self.wf.cached_data('test1', lambda: data, max_age=10) - self.wf.cached_data('test2', lambda: data, max_age=10) - self.assertTrue(os.path.exists(self.wf.cachefile(test_file1))) - self.assertTrue(os.path.exists(self.wf.cachefile(test_file2))) - self.wf.clear_cache() - self.assertFalse(os.path.exists(self.wf.cachefile(test_file1))) - self.assertFalse(os.path.exists(self.wf.cachefile(test_file2))) - - def test_delete_all_cache_file_with_filter_func(self): - """Only part of cached data are deleted""" - data = {'key1': 'value1'} - test_file1 = 'test1.cpickle' - test_file2 = 'test2.cpickle' - - def filter_func(file): - if file == test_file1: - return True - else: - return False - - self.wf.cached_data('test1', lambda: data, max_age=10) - self.wf.cached_data('test2', lambda: data, max_age=10) - self.assertTrue(os.path.exists(self.wf.cachefile(test_file1))) - self.assertTrue(os.path.exists(self.wf.cachefile(test_file2))) - self.wf.clear_cache(filter_func) - self.assertFalse(os.path.exists(self.wf.cachefile(test_file1))) - self.assertTrue(os.path.exists(self.wf.cachefile(test_file2))) - self.wf.clear_cache() - self.assertFalse(os.path.exists(self.wf.cachefile(test_file2))) - - def test_cached_data_callback(self): - """Cached data callback""" - called = {'called': False} - data = [1, 2, 3] - - def getdata(): - called['called'] = True - return data - - d = self.wf.cached_data('test', getdata, max_age=10) - self.assertEqual(d, data) - self.assertTrue(called['called']) - - def test_cached_data_no_callback(self): - """Cached data no callback""" - d = self.wf.cached_data('nonexistent', None) - self.assertEqual(d, None) - - def test_cache_expires(self): - """Cached data expires""" - data = ('hello', 'goodbye') - called = {'called': False} - - def getdata(): - called['called'] = True - return data - - d = self.wf.cached_data('test', getdata, max_age=1) - self.assertEqual(d, data) - self.assertTrue(called['called']) - # should be loaded from cache - called['called'] = False - d2 = self.wf.cached_data('test', getdata, max_age=1) - self.assertEqual(d2, data) - self.assertFalse(called['called']) - # cache has expired - time.sleep(1) - # should be loaded from cache (no expiry) - d3 = self.wf.cached_data('test', getdata, max_age=0) - self.assertEqual(d3, data) - self.assertFalse(called['called']) - # should hit data func (cached data older than 1 sec) - d4 = self.wf.cached_data('test', getdata, max_age=1) - self.assertEqual(d4, data) - self.assertTrue(called['called']) - - def test_cache_fresh(self): - """Cached data is fresh""" - data = 'This is my data' - d = self.wf.cached_data('test', lambda: data, max_age=1) - self.assertEqual(d, data) - self.assertTrue(self.wf.cached_data_fresh('test', max_age=10)) - - def test_cache_fresh_non_existent(self): - """Non-existent cache data is not fresh""" - self.assertEqual(self.wf.cached_data_fresh('popsicle', max_age=10000), - False) - - #################################################################### - # Serialisation - #################################################################### - - def test_cache_serializer(self): - """Cache serializer""" - self.assertEqual(self.wf.cache_serializer, 'cpickle') - - def set_serializer(wf, serializer): - wf.cache_serializer = serializer - - self.assertRaises(ValueError, set_serializer, self.wf, 'non-existent') - self.assertEqual(self.wf.cache_serializer, 'cpickle') - self.wf.cache_serializer = 'pickle' - self.assertEqual(self.wf.cache_serializer, 'pickle') - - def test_alternative_cache_serializer(self): - """Alternative cache serializer""" - data = {'key1': 'value1'} - self.assertEqual(self.wf.cache_serializer, 'cpickle') - self.wf.cache_data('test', data) - self.assertTrue(os.path.exists(self.wf.cachefile('test.cpickle'))) - self.assertEqual(data, self.wf.cached_data('test')) - - self.wf.cache_serializer = 'pickle' - self.assertEqual(None, self.wf.cached_data('test')) - self.wf.cache_data('test', data) - self.assertTrue(os.path.exists(self.wf.cachefile('test.pickle'))) - self.assertEqual(data, self.wf.cached_data('test')) - - self.wf.cache_serializer = 'json' - self.assertEqual(None, self.wf.cached_data('test')) - self.wf.cache_data('test', data) - self.assertTrue(os.path.exists(self.wf.cachefile('test.json'))) - self.assertEqual(data, self.wf.cached_data('test')) - - def test_custom_cache_serializer(self): - """Custom cache serializer""" - data = {'key1': 'value1'} - - class MySerializer(object): - """Simple serializer""" - @classmethod - def load(self, file_obj): - return json.load(file_obj) - - @classmethod - def dump(self, obj, file_obj): - return json.dump(obj, file_obj, indent=2) - - manager.register('spoons', MySerializer) - try: - self.assertFalse(os.path.exists(self.wf.cachefile('test.spoons'))) - self.wf.cache_serializer = 'spoons' - self.wf.cache_data('test', data) - self.assertTrue(os.path.exists(self.wf.cachefile('test.spoons'))) - self.assertEqual(data, self.wf.cached_data('test')) - finally: - manager.unregister('spoons') - - def test_data_serializer(self): - """Data serializer""" - self.assertEqual(self.wf.data_serializer, 'cpickle') - - def set_serializer(wf, serializer): - wf.data_serializer = serializer - - self.assertRaises(ValueError, set_serializer, self.wf, 'non-existent') - self.assertEqual(self.wf.data_serializer, 'cpickle') - self.wf.data_serializer = 'pickle' - self.assertEqual(self.wf.data_serializer, 'pickle') - - def test_alternative_data_serializer(self): - """Alternative data serializer""" - data = {'key7': 'value7'} - - self.assertEqual(self.wf.data_serializer, 'cpickle') - self.wf.store_data('test', data) - for path in self._stored_data_paths('test', 'cpickle'): - self.assertTrue(os.path.exists(path)) - self.assertEqual(data, self.wf.stored_data('test')) - - self.wf.data_serializer = 'pickle' - self.assertEqual(data, self.wf.stored_data('test')) - self.wf.store_data('test', data) - for path in self._stored_data_paths('test', 'pickle'): - self.assertTrue(os.path.exists(path)) - self.assertEqual(data, self.wf.stored_data('test')) - - self.wf.data_serializer = 'json' - self.assertEqual(data, self.wf.stored_data('test')) - self.wf.store_data('test', data) - for path in self._stored_data_paths('test', 'json'): - self.assertTrue(os.path.exists(path)) - self.assertEqual(data, self.wf.stored_data('test')) - - def test_non_existent_stored_data(self): - """Non-existent stored data""" - self.assertTrue(self.wf.stored_data('banjo magic') is None) - - def test_borked_stored_data(self): - """Borked stored data""" - data = {'key7': 'value7'} - - self.wf.store_data('test', data) - metadata, datapath = self._stored_data_paths('test', 'cpickle') - os.unlink(metadata) - self.assertEqual(self.wf.stored_data('test'), None) - - self.wf.store_data('test', data) - metadata, datapath = self._stored_data_paths('test', 'cpickle') - os.unlink(datapath) - self.assertTrue(self.wf.stored_data('test') is None) - - self.wf.store_data('test', data) - metadata, datapath = self._stored_data_paths('test', 'cpickle') - with open(metadata, 'wb') as file_obj: - file_obj.write('bangers and mash') - self.wf.logger.debug('Changed format to `bangers and mash`') - self.assertRaises(ValueError, self.wf.stored_data, 'test') - - def test_reject_settings(self): - """Disallow settings.json""" - data = {'key7': 'value7'} - - self.wf.data_serializer = 'json' - - self.assertRaises(ValueError, self.wf.store_data, 'settings', data) - - def test_invalid_data_serializer(self): - """Invalid data serializer""" - data = {'key7': 'value7'} - - self.assertRaises(ValueError, self.wf.store_data, 'test', data, - 'spong') - - #################################################################### - # Data deletion - #################################################################### - - def test_delete_stored_data(self): - """Delete stored data""" - data = {'key7': 'value7'} - - paths = self._stored_data_paths('test', 'cpickle') - - self.wf.store_data('test', data) - self.assertEqual(data, self.wf.stored_data('test')) - self.wf.store_data('test', None) - self.assertEqual(None, self.wf.stored_data('test')) - - for p in paths: - self.assertFalse(os.path.exists(p)) - - def test_delete_all_stored_data_file(self): - """Stored data are all deleted""" - data = {'key1': 'value1'} - test_file1 = 'test1.cpickle' - test_file2 = 'test2.cpickle' - - self.wf.store_data('test1', data) - self.wf.store_data('test2', data) - self.assertTrue(os.path.exists(self.wf.datafile(test_file1))) - self.assertTrue(os.path.exists(self.wf.datafile(test_file2))) - self.wf.clear_data() - self.assertFalse(os.path.exists(self.wf.datafile(test_file1))) - self.assertFalse(os.path.exists(self.wf.datafile(test_file2))) - - def test_delete_all_data_file_with_filter_func(self): - """Only part of stored data are deleted""" - data = {'key1': 'value1'} - test_file1 = 'test1.cpickle' - test_file2 = 'test2.cpickle' - - def filter_func(file): - if file == test_file1: - return True - else: - return False - - self.wf.store_data('test1', data) - self.wf.store_data('test2', data) - - self.assertTrue(os.path.exists(self.wf.datafile(test_file1))) - self.assertTrue(os.path.exists(self.wf.datafile(test_file2))) - self.wf.clear_data(filter_func) - self.assertFalse(os.path.exists(self.wf.datafile(test_file1))) - self.assertTrue(os.path.exists(self.wf.datafile(test_file2))) - self.wf.clear_data() - self.assertFalse(os.path.exists(self.wf.datafile(test_file2))) - - #################################################################### - # Keychain - #################################################################### - - def test_keychain(self): - """Save/get/delete password""" - self.assertRaises(PasswordNotFound, - self.wf.delete_password, self.account) - self.assertRaises(PasswordNotFound, self.wf.get_password, self.account) - self.wf.save_password(self.account, self.password) - self.assertEqual(self.wf.get_password(self.account), self.password) - self.assertEqual(self.wf.get_password(self.account, BUNDLE_ID), - self.password) - # try to set same password - self.wf.save_password(self.account, self.password) - self.assertEqual(self.wf.get_password(self.account), self.password) - # try to set different password - self.wf.save_password(self.account, self.password2) - self.assertEqual(self.wf.get_password(self.account), self.password2) - # try to set non-ASCII password - self.wf.save_password(self.account, self.password3) - self.assertEqual(self.wf.get_password(self.account), self.password3) - # bad call to _call_security - self.assertRaises(KeychainError, self.wf._call_security, - 'pants', BUNDLE_ID, self.account) - - #################################################################### - # Running workflow - #################################################################### - - def test_run_fails(self): - """Run fails""" - def cb(wf): - self.assertEqual(wf, self.wf) - raise ValueError('Have an error') - self.wf.name # cause info.plist to be parsed - self.wf.help_url = 'http://www.deanishe.net/alfred-workflow/' - ret = self.wf.run(cb) - self.assertEqual(ret, 1) - # named after bundleid - self.wf = Workflow() - self.wf.bundleid - ret = self.wf.run(cb) - self.assertEqual(ret, 1) - - def test_run_fails_with_xml_output(self): - """Run fails with XML output""" - error_text = 'Have an error' - - def cb(wf): - self.assertEqual(wf, self.wf) - raise ValueError(error_text) - - # named after bundleid - self.wf = Workflow() - self.wf.bundleid - - stdout = sys.stdout - sio = StringIO() - sys.stdout = sio - ret = self.wf.run(cb) - sys.stdout = stdout - output = sio.getvalue() - sio.close() - - self.assertEqual(ret, 1) - self.assertTrue(error_text in output) - self.assertTrue(' +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for serializers.""" + +from __future__ import print_function, unicode_literals + +import pytest + + +def test_unicode_paths(wf): + """Workflow paths are Unicode""" + s = b'test.txt' + u = u'über.txt' + assert isinstance(wf.datadir, unicode) + assert isinstance(wf.datafile(s), unicode) + assert isinstance(wf.datafile(u), unicode) + assert isinstance(wf.cachedir, unicode) + assert isinstance(wf.cachefile(s), unicode) + assert isinstance(wf.cachefile(u), unicode) + assert isinstance(wf.workflowdir, unicode) + assert isinstance(wf.workflowfile(s), unicode) + assert isinstance(wf.workflowfile(u), unicode) + + +if __name__ == '__main__': # pragma: no cover + pytest.main([__file__]) diff --git a/tests/test_workflow_env.py b/tests/test_workflow_env.py new file mode 100644 index 00000000..1e084386 --- /dev/null +++ b/tests/test_workflow_env.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for environment/info.plist.""" + +from __future__ import print_function, unicode_literals + +import logging +import os + +import pytest + +from workflow.workflow import Workflow + +from .conftest import ( + env, COMMON, ENV_V4, + BUNDLE_ID, WORKFLOW_NAME, +) +from .util import INFO_PLIST_PATH, dump_env + + +def test_file(infopl): + """info.plist""" + wf = Workflow() + assert wf.name == WORKFLOW_NAME + assert wf.bundleid == BUNDLE_ID + + +def test_file_missing(): + """Info.plist missing""" + wf = Workflow() + assert not os.path.exists(INFO_PLIST_PATH) + with pytest.raises(IOError): + wf.workflowdir + + +def test_env(wf): + """Alfred environmental variables""" + env = COMMON.copy() + env.update(ENV_V4) + for k, v in env.items(): + k = k.replace('alfred_', '') + if k in ('debug', 'version_build', 'theme_subtext'): + assert int(v) == wf.alfred_env[k] + else: + assert isinstance(wf.alfred_env[k], unicode) + assert unicode(v) == wf.alfred_env[k] + + assert wf.datadir == env['alfred_workflow_data'] + assert wf.cachedir == env['alfred_workflow_cache'] + assert wf.bundleid == env['alfred_workflow_bundleid'] + assert wf.name == env['alfred_workflow_name'] + + +def test_alfred_debugger(alfred4): + """Alfred debugger status""" + # With debugger on + with env(alfred_debug='1'): + dump_env() + wf = Workflow() + assert wf.debugging, "Alfred's debugger not open" + assert wf.logger.getEffectiveLevel() == logging.DEBUG + wf.reset() + + # With debugger off + with env(alfred_debug=None): + dump_env() + wf = Workflow() + assert not wf.debugging, "Alfred's debugger is not closed" + assert wf.logger.getEffectiveLevel() == logging.INFO + wf.reset() diff --git a/tests/test_workflow_files.py b/tests/test_workflow_files.py new file mode 100644 index 00000000..ca7bf1f4 --- /dev/null +++ b/tests/test_workflow_files.py @@ -0,0 +1,381 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for Workflow directory & file APIs.""" + +from __future__ import print_function, unicode_literals + +import json +import os +import time + +import pytest + +from workflow import manager, Workflow + +from conftest import env, ENV_V4, ENV_V2 + + +def test_directories(alfred4): + """Workflow directories""" + data = ENV_V4.get('alfred_workflow_data') + cache = ENV_V4.get('alfred_workflow_cache') + wf = Workflow() + assert wf.datadir == data + assert os.path.exists(wf.datadir) + assert wf.cachedir == cache + assert os.path.exists(wf.cachedir) + wf.reset() + + # defaults + with env(alfred_workflow_data=None, alfred_workflow_cache=None): + data = ENV_V2.get('alfred_workflow_data') + cache = ENV_V2.get('alfred_workflow_cache') + wf = Workflow() + assert wf.datadir == data + assert os.path.exists(wf.datadir) + assert wf.cachedir == cache + assert os.path.exists(wf.cachedir) + wf.reset() + + +def test_create_directories(alfred4, tempdir): + """Workflow creates directories.""" + data = os.path.join(tempdir, 'data') + cache = os.path.join(tempdir, 'cache') + + assert not os.path.exists(data) + assert not os.path.exists(cache) + + with env(alfred_workflow_data=data, + alfred_workflow_cache=cache): + wf = Workflow() + assert wf.datadir == data + assert os.path.exists(data) + assert wf.cachedir == cache + assert os.path.exists(cache) + wf.reset() + + +def test_cached_data(wf): + """Cached data stored""" + data = {'key1': 'value1'} + d = wf.cached_data('test', lambda: data, max_age=10) + assert data == d + + +def test_cached_data_deleted(wf): + """Cached data deleted""" + data = {'key1': 'value1'} + d = wf.cached_data('test', lambda: data, max_age=10) + assert data == d + assert wf.cache_data('test', None) is None + assert not os.path.exists(wf.cachefile('test.cpickle')) + # Test alternate code path for non-existent file + assert wf.cache_data('test', None) is None + + +def test_delete_all_cache_file(wf): + """Cached data are all deleted""" + data = {'key1': 'value1'} + test_file1 = 'test1.cpickle' + test_file2 = 'test2.cpickle' + + wf.cached_data('test1', lambda: data, max_age=10) + wf.cached_data('test2', lambda: data, max_age=10) + assert os.path.exists(wf.cachefile(test_file1)) + assert os.path.exists(wf.cachefile(test_file2)) + wf.clear_cache() + assert not os.path.exists(wf.cachefile(test_file1)) + assert not os.path.exists(wf.cachefile(test_file2)) + + +def test_delete_all_cache_file_with_filter_func(wf): + """Only part of cached data are deleted""" + data = {'key1': 'value1'} + test_file1 = 'test1.cpickle' + test_file2 = 'test2.cpickle' + + def filter_func(file): + if file == test_file1: + return True + else: + return False + + wf.cached_data('test1', lambda: data, max_age=10) + wf.cached_data('test2', lambda: data, max_age=10) + assert os.path.exists(wf.cachefile(test_file1)) + assert os.path.exists(wf.cachefile(test_file2)) + wf.clear_cache(filter_func) + assert not os.path.exists(wf.cachefile(test_file1)) + assert os.path.exists(wf.cachefile(test_file2)) + wf.clear_cache() + assert not os.path.exists(wf.cachefile(test_file2)) + + +def test_cached_data_callback(wf): + """Cached data callback""" + called = {'called': False} + data = [1, 2, 3] + + def getdata(): + called['called'] = True + return data + + d = wf.cached_data('test', getdata, max_age=10) + assert d == data + assert called['called'] is True + + +def test_cached_data_no_callback(wf): + """Cached data no callback""" + d = wf.cached_data('nonexistent', None) + assert d is None + + +def test_cache_expires(wf): + """Cached data expires""" + data = ('hello', 'goodbye') + called = {'called': False} + + def getdata(): + called['called'] = True + return data + + d = wf.cached_data('test', getdata, max_age=1) + assert d == data + assert called['called'] is True + # should be loaded from cache + called['called'] = False + d2 = wf.cached_data('test', getdata, max_age=1) + assert d2 == data + assert called['called'] is not True + # cache has expired + time.sleep(1) + # should be loaded from cache (no expiry) + d3 = wf.cached_data('test', getdata, max_age=0) + assert d3 == data + assert called['called'] is not True + # should hit data func (cached data older than 1 sec) + d4 = wf.cached_data('test', getdata, max_age=1) + assert d4 == data + assert called['called'] is True + + +def test_cache_fresh(wf): + """Cached data is fresh""" + data = 'This is my data' + d = wf.cached_data('test', lambda: data, max_age=1) + assert d == data + assert wf.cached_data_fresh('test', max_age=10) + + +def test_cache_fresh_non_existent(wf): + """Non-existent cache data is not fresh""" + assert not wf.cached_data_fresh('popsicle', max_age=10000) + + +def test_cache_serializer(wf): + """Cache serializer""" + # default + assert wf.cache_serializer == 'cpickle' + # unsupported format + with pytest.raises(ValueError): + wf.cache_serializer = 'non-existent' + # default unchanged + assert wf.cache_serializer == 'cpickle' + wf.cache_serializer = 'pickle' + # other built-in + assert wf.cache_serializer == 'pickle' + + +def test_alternative_cache_serializer(wf): + """Alternative cache serializer""" + data = {'key1': 'value1'} + assert wf.cache_serializer == 'cpickle' + wf.cache_data('test', data) + assert os.path.exists(wf.cachefile('test.cpickle')) + assert wf.cached_data('test') == data + + wf.cache_serializer = 'pickle' + assert wf.cached_data('test') is None + wf.cache_data('test', data) + assert os.path.exists(wf.cachefile('test.pickle')) + assert wf.cached_data('test') == data + + wf.cache_serializer = 'json' + assert wf.cached_data('test') is None + wf.cache_data('test', data) + assert os.path.exists(wf.cachefile('test.json')) + assert wf.cached_data('test') == data + + +def test_custom_cache_serializer(wf): + """Custom cache serializer""" + data = {'key1': 'value1'} + + class MySerializer(object): + """Simple serializer""" + + @classmethod + def load(self, file_obj): + return json.load(file_obj) + + @classmethod + def dump(self, obj, file_obj): + return json.dump(obj, file_obj, indent=2) + + manager.register('spoons', MySerializer) + try: + assert not os.path.exists(wf.cachefile('test.spoons')) + wf.cache_serializer = 'spoons' + wf.cache_data('test', data) + assert os.path.exists(wf.cachefile('test.spoons')) + assert wf.cached_data('test') == data + finally: + manager.unregister('spoons') + + +def _stored_data_paths(wf, name, serializer): + """Return list of paths created when storing data""" + metadata = wf.datafile('.{}.alfred-workflow'.format(name)) + datapath = wf.datafile(name + '.' + serializer) + return [metadata, datapath] + + +def test_data_serializer(wf): + """Data serializer""" + # default + assert wf.data_serializer == 'cpickle' + # unsupported format + with pytest.raises(ValueError): + wf.data_serializer = 'non-existent' + # default unchanged + assert wf.data_serializer == 'cpickle' + wf.data_serializer = 'pickle' + # other built-in + assert wf.data_serializer == 'pickle' + + +def test_alternative_data_serializer(wf): + """Alternative data serializer""" + data = {'key1': 'value1'} + assert wf.data_serializer == 'cpickle' + wf.store_data('test', data) + for path in _stored_data_paths(wf, 'test', 'cpickle'): + assert os.path.exists(path) + assert wf.stored_data('test') == data + + wf.data_serializer = 'pickle' + assert wf.stored_data('test') == data + wf.store_data('test', data) + for path in _stored_data_paths(wf, 'test', 'pickle'): + assert os.path.exists(path) + assert wf.stored_data('test') == data + + wf.data_serializer = 'json' + assert wf.stored_data('test') == data + wf.store_data('test', data) + for path in _stored_data_paths(wf, 'test', 'json'): + assert os.path.exists(path) + assert wf.stored_data('test') == data + + +def test_non_existent_stored_data(wf): + """Non-existent stored data""" + assert wf.stored_data('banjo magic') is None + + +def test_borked_stored_data(wf): + """Borked stored data""" + data = {'key7': 'value7'} + + wf.store_data('test', data) + metadata, datapath = _stored_data_paths(wf, 'test', 'cpickle') + os.unlink(metadata) + assert wf.stored_data('test') is None + + wf.store_data('test', data) + metadata, datapath = _stored_data_paths(wf, 'test', 'cpickle') + os.unlink(datapath) + assert wf.stored_data('test') is None + + wf.store_data('test', data) + metadata, datapath = _stored_data_paths(wf, 'test', 'cpickle') + with open(metadata, 'wb') as file_obj: + file_obj.write('bangers and mash') + wf.logger.debug('Changed format to `bangers and mash`') + with pytest.raises(ValueError): + wf.stored_data('test') + + +def test_reject_settings(wf): + """Refuse to overwrite settings.json""" + data = {'key7': 'value7'} + wf.data_serializer = 'json' + with pytest.raises(ValueError): + wf.store_data('settings', data) + + +def test_invalid_data_serializer(wf): + """Invalid data serializer""" + data = {'key7': 'value7'} + with pytest.raises(ValueError): + wf.store_data('test', data, 'spong') + + +def test_delete_stored_data(wf): + """Delete stored data""" + data = {'key7': 'value7'} + + paths = _stored_data_paths(wf, 'test', 'cpickle') + + wf.store_data('test', data) + assert wf.stored_data('test') == data + wf.store_data('test', None) + assert wf.stored_data('test') is None + + for p in paths: + assert not os.path.exists(p) + + +def test_delete_all_stored_data_file(wf): + """Stored data are all deleted""" + data = {'key1': 'value1'} + test_file1 = 'test1.cpickle' + test_file2 = 'test2.cpickle' + + wf.store_data('test1', data) + wf.store_data('test2', data) + assert os.path.exists(wf.datafile(test_file1)) + assert os.path.exists(wf.datafile(test_file2)) + wf.clear_data() + assert not os.path.exists(wf.datafile(test_file1)) + assert not os.path.exists(wf.datafile(test_file2)) + + +def test_delete_all_data_file_with_filter_func(wf): + """Only part of stored data are deleted""" + data = {'key1': 'value1'} + test_file1 = 'test1.cpickle' + test_file2 = 'test2.cpickle' + + def filter_func(file): + if file == test_file1: + return True + else: + return False + + wf.store_data('test1', data) + wf.store_data('test2', data) + + assert os.path.exists(wf.datafile(test_file1)) + assert os.path.exists(wf.datafile(test_file2)) + wf.clear_data(filter_func) + assert not os.path.exists(wf.datafile(test_file1)) + assert os.path.exists(wf.datafile(test_file2)) + wf.clear_data() + assert not os.path.exists(wf.datafile(test_file2)) diff --git a/tests/test_workflow_filter.py b/tests/test_workflow_filter.py new file mode 100644 index 00000000..593cce49 --- /dev/null +++ b/tests/test_workflow_filter.py @@ -0,0 +1,207 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for :meth:`workflow.Workflow.filter`.""" + +from __future__ import print_function, unicode_literals + +import pytest + +from workflow.workflow import ( + MATCH_ALL, MATCH_ALLCHARS, + MATCH_ATOM, MATCH_CAPITALS, MATCH_STARTSWITH, + MATCH_SUBSTRING, MATCH_INITIALS_CONTAIN, + MATCH_INITIALS_STARTSWITH, +) + +SEARCH_ITEMS = [ + ('Test Item One', MATCH_STARTSWITH), + ('test item two', MATCH_STARTSWITH), + ('TwoExtraSpecialTests', MATCH_CAPITALS), + ('this-is-a-test', MATCH_ATOM), + ('the extra special trials', MATCH_INITIALS_STARTSWITH), + ('not the extra special trials', MATCH_INITIALS_CONTAIN), + ('intestinal fortitude', MATCH_SUBSTRING), + ('the splits', MATCH_ALLCHARS), + ('nomatch', 0), +] + +SEARCH_ITEMS_DIACRITICS = [ + # search key, query + ('Änderungen vorbehalten', 'av'), + ('Änderungen', 'anderungen'), + ('überwiegend bewolkt', 'ub'), + ('überwiegend', 'uberwiegend'), + ('Öffnungszeiten an Feiertagen', 'offnungszeiten'), + ('Öffnungszeiten an Feiertagen', 'oaf'), + ('Fußpilz', 'fuss'), + ('salé', 'sale') +] + +PUNCTUATION_DATA = [ + ('"test"', '"test"'), + ('„wat denn?“', '"wat denn?"'), + ('‚wie dat denn?‘', "'wie dat denn?'"), + ('“test”', '"test"'), + ('and—why—not', 'and-why-not'), + ('10–20', '10-20'), + ('Shady’s back', "Shady's back"), +] + + +def _print_results(results): + """Print results of Workflow.filter""" + for item, score, rule in results: + print('{!r} (rule {}) : {}'.format(item[0], rule, score)) + + +def test_filter_all_rules(wf): + """Filter: all rules""" + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + ascending=True, match_on=MATCH_ALL) + assert len(results) == 8 + # now with scores, rules + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + include_score=True, match_on=MATCH_ALL) + assert len(results) == 8 + for item, score, rule in results: + wf.logger.debug('%s : %s', item, score) + for value, r in SEARCH_ITEMS: + if value == item[0]: + assert rule == r + # self.assertTrue(False) + + +def test_filter_no_caps(wf): + """Filter: no caps""" + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + ascending=True, + match_on=MATCH_ALL ^ MATCH_CAPITALS, + include_score=True) + _print_results(results) + for _, _, rule in results: + assert rule != MATCH_CAPITALS + # assert len(results) == 7 + + +def test_filter_only_caps(wf): + """Filter: only caps""" + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + ascending=True, + match_on=MATCH_CAPITALS, + include_score=True) + _print_results(results) + assert len(results) == 1 + + +def test_filter_max_results(wf): + """Filter: max results""" + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + ascending=True, max_results=4) + assert len(results) == 4 + + +def test_filter_min_score(wf): + """Filter: min score""" + results = wf.filter('test', SEARCH_ITEMS, key=lambda x: x[0], + ascending=True, min_score=90, + include_score=True) + assert len(results) == 6 + + +def test_filter_folding(wf): + """Filter: diacritic folding""" + for key, query in SEARCH_ITEMS_DIACRITICS: + results = wf.filter(query, [key], min_score=90, + include_score=True) + assert len(results) == 1 + + +def test_filter_no_folding(wf): + """Filter: folding turned off for non-ASCII query""" + data = ['fühler', 'fuhler', 'fübar', 'fubar'] + results = wf.filter('fü', data) + assert len(results) == 2 + + +def test_filter_folding_off(wf): + """Filter: diacritic folding off""" + for key, query in SEARCH_ITEMS_DIACRITICS: + results = wf.filter(query, [key], min_score=90, + include_score=True, + fold_diacritics=False) + assert len(results) == 0 + + +def test_filter_folding_force_on(wf): + """Filter: diacritic folding forced on""" + wf.settings['__workflow_diacritic_folding'] = True + for key, query in SEARCH_ITEMS_DIACRITICS: + results = wf.filter(query, [key], min_score=90, + include_score=True, + fold_diacritics=False) + assert len(results) == 1 + + +def test_filter_folding_force_off(wf): + """Filter: diacritic folding forced off""" + wf.settings['__workflow_diacritic_folding'] = False + for key, query in SEARCH_ITEMS_DIACRITICS: + results = wf.filter(query, [key], min_score=90, + include_score=True) + assert len(results) == 0 + + +def test_filter_empty_key(wf): + """Filter: empty keys are ignored""" + data = ['bob', 'sue', 'henry'] + + def key(s): + """Return empty key""" + return '' + + results = wf.filter('lager', data, key) + assert len(results) == 0 + + +def test_filter_empty_query_words(wf): + """Filter: empty query returns all results""" + data = ['bob', 'sue', 'henry'] + assert wf.filter(' ', data) == data + assert wf.filter('', data) == data + + +def test_filter_empty_query_words_ignored(wf): + """Filter: empty query words ignored""" + data = ['bob jones', 'sue smith', 'henry rogers'] + results = wf.filter('bob jones', data) + assert len(results) == 1 + + +def test_filter_identical_items(wf): + """Filter: identical items are not discarded""" + data = ['bob', 'bob', 'bob'] + results = wf.filter('bob', data) + assert len(results) == len(data) + + +def test_filter_reversed_results(wf): + """Filter: results reversed""" + data = ['bob', 'bobby', 'bobby smith'] + results = wf.filter('bob', data) + assert results == data + results = wf.filter('bob', data, ascending=True) + assert results == data[::-1] + + +def test_punctuation(wf): + """Punctuation: dumbified""" + for input, output in PUNCTUATION_DATA: + assert wf.dumbify_punctuation(input) == output + + +if __name__ == '__main__': # pragma: no cover + pytest.main([__file__]) diff --git a/tests/test_workflow_import.py b/tests/test_workflow_import.py new file mode 100644 index 00000000..43cc2248 --- /dev/null +++ b/tests/test_workflow_import.py @@ -0,0 +1,35 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for sys.path manipulation.""" + +from __future__ import print_function, unicode_literals + +import os +import sys + +import pytest + +from workflow.workflow import Workflow + + +LIBS = [os.path.join(os.path.dirname(__file__), b'lib')] + + +def test_additional_libs(alfred4, infopl): + """Additional libraries""" + wf = Workflow(libraries=LIBS) + for path in LIBS: + assert path in sys.path + + assert sys.path[0:len(LIBS)] == LIBS + import youcanimportme + youcanimportme.noop() + wf.reset() + + +if __name__ == '__main__': # pragma: no cover + pytest.main([__file__]) diff --git a/tests/test_workflow_keychain.py b/tests/test_workflow_keychain.py new file mode 100644 index 00000000..dae2a201 --- /dev/null +++ b/tests/test_workflow_keychain.py @@ -0,0 +1,55 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for Keychain API.""" + +from __future__ import print_function, unicode_literals + +import pytest + +from workflow.workflow import PasswordNotFound, KeychainError + +from .conftest import BUNDLE_ID + + +ACCOUNT = 'this-is-my-test-account' +PASSWORD = 'hunter2' +PASSWORD2 = 'hunter2ing' +PASSWORD3 = 'hünter\\“2”' + + +def test_keychain(wf): + """Save/get/delete password""" + # ensure password is unset + try: + wf.delete_password(ACCOUNT) + except PasswordNotFound: + pass + + with pytest.raises(PasswordNotFound): + wf.delete_password(ACCOUNT) + with pytest.raises(PasswordNotFound): + wf.get_password(ACCOUNT) + + wf.save_password(ACCOUNT, PASSWORD) + assert wf.get_password(ACCOUNT) == PASSWORD + assert wf.get_password(ACCOUNT, BUNDLE_ID) + + # set same password + wf.save_password(ACCOUNT, PASSWORD) + assert wf.get_password(ACCOUNT) == PASSWORD + + # set different password + wf.save_password(ACCOUNT, PASSWORD2) + assert wf.get_password(ACCOUNT) == PASSWORD2 + + # set non-ASCII password + wf.save_password(ACCOUNT, PASSWORD3) + assert wf.get_password(ACCOUNT) == PASSWORD3 + + # bad call to _call_security + with pytest.raises(KeychainError): + wf._call_security('pants', BUNDLE_ID, ACCOUNT) diff --git a/tests/test_workflow_magic.py b/tests/test_workflow_magic.py old mode 100755 new mode 100644 index 9f5f908a..7870014f --- a/tests/test_workflow_magic.py +++ b/tests/test_workflow_magic.py @@ -8,8 +8,7 @@ # Created on 2017-05-06 # -""" -""" +"""Unit tests for magic arguments.""" from __future__ import print_function @@ -19,10 +18,11 @@ from workflow import Workflow -from util import VersionFile, WorkflowMock +from .conftest import env +from .util import VersionFile, WorkflowMock -def test_list_magic(info2): +def test_list_magic(infopl): """Magic: list magic""" # TODO: Verify output somehow with WorkflowMock(['script', 'workflow:magic']) as c: @@ -30,33 +30,44 @@ def test_list_magic(info2): # Process magic arguments wf.args assert not c.cmd + wf.reset() -def test_version_magic(info2): +def test_version_magic(infopl): """Magic: version magic""" # TODO: Verify output somehow vstr = '1.9.7' - - # Versioned - with WorkflowMock(['script', 'workflow:version']) as c: - with VersionFile(vstr): + # Version from file(s) + with env(alfred_workflow_version=None): + # Version file + with WorkflowMock(['script', 'workflow:version']) as c: + with VersionFile(vstr): + wf = Workflow() + # Process magic arguments + wf.args + assert not c.cmd + wf.reset() + + # info.plist + with WorkflowMock(['script', 'workflow:version']) as c: wf = Workflow() # Process magic arguments wf.args + assert not c.cmd + wf.reset() - assert not c.cmd - # wf.logger.debug('STDERR : {0}'.format(c.stderr)) - - # Unversioned - with WorkflowMock(['script', 'workflow:version']) as c: - wf = Workflow() - # Process magic arguments - wf.args - assert not c.cmd + # Environment variable + with env(alfred_workflow_version=vstr): + with WorkflowMock(['script', 'workflow:version']) as c: + wf = Workflow() + # Process magic arguments + wf.args + assert not c.cmd + wf.reset() -def test_openhelp(info2): +def test_openhelp(infopl): """Magic: open help URL""" url = 'http://www.deanishe.net/alfred-workflow/' with WorkflowMock(['script', 'workflow:help']) as c: @@ -64,63 +75,70 @@ def test_openhelp(info2): # Process magic arguments wf.args assert c.cmd == ['open', url] + wf.reset() -def test_openhelp_no_url(info2): +def test_openhelp_no_url(infopl): """Magic: no help URL""" with WorkflowMock(['script', 'workflow:help']) as c: wf = Workflow() # Process magic arguments wf.args assert not c.cmd + wf.reset() -def test_openlog(info2): +def test_openlog(infopl): """Magic: open logfile""" with WorkflowMock(['script', 'workflow:openlog']) as c: wf = Workflow() # Process magic arguments wf.args assert c.cmd == ['open', wf.logfile] + wf.reset() -def test_cachedir(info2): +def test_cachedir(infopl): """Magic: open cachedir""" with WorkflowMock(['script', 'workflow:opencache']) as c: wf = Workflow() # Process magic arguments wf.args assert c.cmd == ['open', wf.cachedir] + wf.reset() -def test_datadir(info2): +def test_datadir(infopl): """Magic: open datadir""" with WorkflowMock(['script', 'workflow:opendata']) as c: wf = Workflow() # Process magic arguments wf.args assert c.cmd == ['open', wf.datadir] + wf.reset() -def test_workflowdir(info2): +def test_workflowdir(infopl): """Magic: open workflowdir""" with WorkflowMock(['script', 'workflow:openworkflow']) as c: wf = Workflow() # Process magic arguments wf.args assert c.cmd == ['open', wf.workflowdir] + wf.reset() -def test_open_term(info2): +def test_open_term(infopl): """Magic: open Terminal""" with WorkflowMock(['script', 'workflow:openterm']) as c: wf = Workflow() # Process magic arguments wf.args assert c.cmd == ['open', '-a', 'Terminal', wf.workflowdir] + wf.reset() -def test_delete_data(info2): +def test_delete_data(infopl): """Magic: delete data""" with WorkflowMock(['script', 'workflow:deldata']): wf = Workflow() @@ -132,9 +150,10 @@ def test_delete_data(info2): # Process magic arguments wf.args assert not os.path.exists(testpath) + wf.reset() -def test_delete_cache(info2): +def test_delete_cache(infopl): """Magic: delete cache""" with WorkflowMock(['script', 'workflow:delcache']): wf = Workflow() @@ -146,9 +165,10 @@ def test_delete_cache(info2): # Process magic arguments wf.args assert not os.path.exists(testpath) + wf.reset() -def test_reset(info2): +def test_reset(infopl): """Magic: reset""" with WorkflowMock(['script', 'workflow:reset']): wf = Workflow() @@ -169,9 +189,10 @@ def test_reset(info2): for p in (datatest, cachetest, settings_path): assert not os.path.exists(p) + wf.reset() -def test_delete_settings(info2): +def test_delete_settings(infopl): """Magic: delete settings""" with WorkflowMock(['script', 'workflow:delsettings']): wf = Workflow() @@ -187,9 +208,10 @@ def test_delete_settings(info2): wf3 = Workflow() assert 'key' not in wf3.settings + wf.reset() -def test_folding(info2): +def test_folding(infopl): """Magic: folding""" with WorkflowMock(['script', 'workflow:foldingdefault']): wf = Workflow() @@ -214,9 +236,10 @@ def test_folding(info2): # Process magic arguments wf.args assert wf.settings.get('__workflow_diacritic_folding') is False + wf.reset() -def test_prereleases(info2): +def test_prereleases(infopl): """Magic: prereleases""" with WorkflowMock(['script', 'workflow:prereleases']): wf = Workflow() @@ -224,6 +247,7 @@ def test_prereleases(info2): wf.args assert wf.settings.get('__workflow_prereleases') is True assert wf.prereleases is True + wf.reset() with WorkflowMock(['script', 'workflow:noprereleases']): wf = Workflow() @@ -231,10 +255,11 @@ def test_prereleases(info2): wf.args assert wf.settings.get('__workflow_prereleases') is False assert wf.prereleases is False + wf.reset() -def test_update_settings_override_magic_prereleases(info2): - """Magic: pre-release updates can be overridden by `True` value for `prereleases` key in `update_settings`""" +def test_update_settings_override_magic_prereleases(infopl): + """Magic: pre-release updates can be overridden by `update_settings`""" with WorkflowMock(['script', 'workflow:prereleases']): d = {'prereleases': True} wf = Workflow(update_settings=d) @@ -242,6 +267,7 @@ def test_update_settings_override_magic_prereleases(info2): wf.args assert wf.settings.get('__workflow_prereleases') is True assert wf.prereleases is True + wf.reset() with WorkflowMock(['script', 'workflow:noprereleases']): wf = Workflow(update_settings=d) @@ -249,6 +275,7 @@ def test_update_settings_override_magic_prereleases(info2): wf.args assert wf.settings.get('__workflow_prereleases') is False assert wf.prereleases is True + wf.reset() if __name__ == '__main__': # pragma: no cover diff --git a/tests/test_workflow_magic_alfred2.py b/tests/test_workflow_magic_alfred2.py new file mode 100644 index 00000000..d832a02b --- /dev/null +++ b/tests/test_workflow_magic_alfred2.py @@ -0,0 +1,52 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created on 2019-05-05 + +"""Unit tests for Alfred 2 magic argument handling.""" + +from __future__ import print_function + +import pytest + +from workflow import Workflow + +from .conftest import env +from .util import VersionFile, WorkflowMock + + +def test_version_magic(infopl2): + """Magic: version magic (Alfred 2)""" + vstr = '1.9.7' + # Version from version file + with env(alfred_workflow_version=None): + # Versioned + with WorkflowMock(['script', 'workflow:version']) as c: + with VersionFile(vstr): + wf = Workflow() + # Process magic arguments + wf.args + assert not c.cmd + wf.reset() + + # Unversioned + with WorkflowMock(['script', 'workflow:version']) as c: + wf = Workflow() + # Process magic arguments + wf.args + assert not c.cmd + wf.reset() + + # Version from environment variable + with env(alfred_workflow_version=vstr): + with WorkflowMock(['script', 'workflow:version']) as c: + wf = Workflow() + # Process magic arguments + wf.args + assert not c.cmd + wf.reset() + + +if __name__ == '__main__': # pragma: no cover + pytest.main([__file__]) diff --git a/tests/test_workflow_run.py b/tests/test_workflow_run.py new file mode 100644 index 00000000..130ee469 --- /dev/null +++ b/tests/test_workflow_run.py @@ -0,0 +1,116 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for Workflow.run.""" + +from __future__ import print_function, unicode_literals + +from StringIO import StringIO +import sys + +import pytest + +from workflow.workflow import Workflow + +from conftest import env + + +def test_run_fails(infopl): + """Run fails""" + wf = Workflow() + + def cb(wf2): + assert wf2 is wf + raise ValueError('Have an error') + + wf.help_url = 'http://www.deanishe.net/alfred-workflow/' + ret = wf.run(cb) + assert ret == 1 + + # read name from info.plist + with env(alfred_workflow_name=None): + wf = Workflow() + wf.name + ret = wf.run(cb) + assert ret == 1 + + # named after bundleid + wf = Workflow() + wf.bundleid + ret = wf.run(cb) + assert ret == 1 + + wf.reset() + + +def test_run_fails_with_xml_output(wf): + """Run fails with XML output""" + error_text = 'Have an error' + stdout = sys.stdout + buf = StringIO() + sys.stdout = buf + + def cb(wf2): + assert wf2 is wf + raise ValueError(error_text) + + ret = wf.run(cb) + + sys.stdout = stdout + output = buf.getvalue() + buf.close() + + assert ret == 1 + assert error_text in output + assert ' mt1) s2 = Settings(self.settings_file) diff --git a/tests/test_workflow_update.py b/tests/test_workflow_update.py old mode 100755 new mode 100644 index 5469a6bf..72485fe3 --- a/tests/test_workflow_update.py +++ b/tests/test_workflow_update.py @@ -8,18 +8,27 @@ # Created on 2016-02-27 # +"""Unit tests for Workflow's update API.""" + from __future__ import print_function from contextlib import contextmanager +import os import pytest -import pytest_localserver +import pytest_localserver # noqa: F401 from workflow import Workflow from workflow import update -from util import WorkflowMock, create_info_plist, delete_info_plist -from test_update import fakeresponse, DATA_JSON, HTTP_HEADERS_JSON +from .conftest import env +from .util import ( + WorkflowMock, + create_info_plist, + delete_info_plist, + dump_env, +) +from test_update import fakeresponse, RELEASES_JSON, HTTP_HEADERS_JSON UPDATE_SETTINGS = { @@ -37,16 +46,15 @@ def dummy(*args, **kwargs): @contextmanager def ctx(args=None, update_settings=None, clear=True): + """Context manager that provides a Workflow and WorkflowMock.""" update_settings = update_settings or UPDATE_SETTINGS args = args or [] - c = dummy() if args: # Add placeholder for ARGV[0] args = ['script'] + args + create_info_plist() - if args: - c = WorkflowMock(args) - with c: + with WorkflowMock(args) as c: wf = Workflow(update_settings=update_settings) yield wf, c if clear: @@ -73,14 +81,35 @@ def fake(wf): print('update_available={0!r}'.format(wf.update_available)) -def test_update(httpserver): +def test_check_update(httpserver, alfred4): + """Auto-update installs update""" + def update(wf): + wf.check_update() + + # Mock subprocess.call etc. so the script doesn't try to + # update the workflow in Alfred + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + with ctx() as (wf, c): + wf.run(update) + assert c.cmd[0] == '/usr/bin/python' + assert c.cmd[2] == '__workflow_update_check' + + update_settings = UPDATE_SETTINGS.copy() + update_settings['prereleases'] = True + with ctx(update_settings=update_settings) as (wf, c): + wf.run(update) + assert c.cmd[0] == '/usr/bin/python' + assert c.cmd[2] == '__workflow_update_check' + + +def test_install_update(httpserver, alfred4): """Auto-update installs update""" def fake(wf): return # Mock subprocess.call etc. so the script doesn't try to # update the workflow in Alfred - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): with ctx(['workflow:update'], clear=False) as (wf, c): wf.run(fake) wf.args @@ -91,53 +120,74 @@ def fake(wf): assert c.cmd[2] == '__workflow_update_install' update_settings = UPDATE_SETTINGS.copy() - update_settings['version'] = 'v6.0' - with ctx(['workflow:update'], update_settings) as (wf, c): - wf.run(fake) - wf.args - - # Update command wasn't called - assert c.cmd == () - - -def test_update_with_prereleases(httpserver): + del update_settings['version'] + with env(alfred_workflow_version='v9.0'): + with ctx(['workflow:update'], + update_settings, clear=False) as (wf, c): + wf.run(fake) + wf.args + # Update command wasn't called + assert c.cmd == (), "unexpected command call" + + # via update settings + with env(alfred_workflow_version=None): + update_settings['version'] = 'v9.0' + with ctx(['workflow:update'], + update_settings, clear=False) as (wf, c): + wf.run(fake) + wf.args + # Update command wasn't called + assert c.cmd == (), "unexpected command call" + + # unversioned + with env(alfred_workflow_version=None): + del update_settings['version'] + with ctx(['workflow:update'], update_settings) as (wf, c): + wf.run(fake) + with pytest.raises(ValueError): + wf.args + + +def test_install_update_prereleases(httpserver, alfred4): """Auto-update installs update with pre-releases enabled""" def fake(wf): return # Mock subprocess.call etc. so the script doesn't try to # update the workflow in Alfred - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): update_settings = UPDATE_SETTINGS.copy() update_settings['prereleases'] = True with ctx(['workflow:update'], update_settings, clear=False) as (wf, c): wf.run(fake) wf.args - print('Magic update command : {0!r}'.format(c.cmd)) + print('Magic update command : {!r}'.format(c.cmd)) assert c.cmd[0] == '/usr/bin/python' assert c.cmd[2] == '__workflow_update_install' - update_settings = UPDATE_SETTINGS.copy() - update_settings['version'] = 'v7.1-beta' - update_settings['prereleases'] = True - with ctx(['workflow:update'], update_settings) as (wf, c): - wf.run(fake) - wf.args - - # Update command wasn't called - assert c.cmd == () + with env(alfred_workflow_version='v10.0-beta'): + update_settings = UPDATE_SETTINGS.copy() + update_settings['version'] = 'v10.0-beta' + update_settings['prereleases'] = True + with ctx(['workflow:update'], update_settings) as (wf, c): + wf.run(fake) + wf.args + dump_env() + # Update command wasn't called + assert c.cmd == () -def test_update_available(httpserver): +def test_update_available(httpserver, alfred4): """update_available property works""" - slug = UPDATE_SETTINGS['github_slug'] - v = UPDATE_SETTINGS['version'] - with fakeresponse(httpserver, DATA_JSON, HTTP_HEADERS_JSON): - with ctx() as (wf, c): + repo = UPDATE_SETTINGS['github_slug'] + v = os.getenv('alfred_workflow_version') + with fakeresponse(httpserver, RELEASES_JSON, HTTP_HEADERS_JSON): + with ctx() as (wf, _): + wf.reset() assert wf.update_available is False - update.check_update(slug, v) + update.check_update(repo, v) assert wf.update_available is True @@ -145,11 +195,7 @@ def test_update_turned_off(): """Auto-update turned off""" # Check update isn't performed if user has turned off # auto-update - - def fake(wf): - return - - with ctx() as (wf, c): + with ctx() as (wf, _): wf.settings['__workflow_autoupdate'] = False assert wf.check_update() is None diff --git a/tests/test_workflow_version.py b/tests/test_workflow_version.py deleted file mode 100755 index 153924bf..00000000 --- a/tests/test_workflow_version.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -# -# Copyright (c) 2016 Dean Jackson -# -# MIT Licence. See http://opensource.org/licenses/MIT -# -# Created on 2016-09-10 -# - -"""Unit tests for workflow version determination.""" - -from __future__ import print_function, unicode_literals - -import os -import pytest - -from workflow.workflow3 import Workflow3 -from workflow.update import Version - - -def test_version_info_plist(info3): - """Version from info.plist.""" - wf = Workflow3() - assert wf.version == Version('1.1.1') - - -def test_version_envvar(info3): - """Version from environment variable.""" - os.environ['alfred_workflow_version'] = '1.1.2' - wf = Workflow3() - try: - assert wf.version == Version('1.1.2') - finally: - del os.environ['alfred_workflow_version'] - - -def test_version_update_settings(info3): - """Version from update_settings.""" - wf = Workflow3(update_settings={'version': '1.1.3'}) - assert wf.version == Version('1.1.3') - - -if __name__ == '__main__': # pragma: no cover - pytest.main([__file__]) diff --git a/tests/test_workflow_versions.py b/tests/test_workflow_versions.py new file mode 100644 index 00000000..e233c12f --- /dev/null +++ b/tests/test_workflow_versions.py @@ -0,0 +1,188 @@ +# encoding: utf-8 +# Copyright (c) 2019 Dean Jackson +# MIT Licence applies http://opensource.org/licenses/MIT +# +# Created 2019-05-05 + +"""Unit tests for workflow version determination.""" + +from __future__ import print_function, unicode_literals + +import pytest + +from workflow.update import Version +from workflow.workflow import Workflow +from workflow.workflow3 import Workflow3 + +from .conftest import env, WORKFLOW_VERSION +from .util import VersionFile + + +def test_info_plist(infopl): + """Version from info.plist.""" + wf = Workflow3() + assert wf.version == Version('1.1.1'), "unexpected version" + + +def test_envvar(infopl): + """Version from environment variable.""" + v = '1.1.2' + with env(alfred_workflow_version=v): + wf = Workflow3() + assert wf.version == Version(v), "unexpected version" + # environment variables have priority + wf = Workflow3(update_settings={'version': '1.1.3'}) + assert wf.version == Version(v), "unexpected version" + + +def test_update_settings(infopl): + """Version from update_settings.""" + v = '1.1.3' + wf = Workflow3(update_settings={'version': v}) + assert wf.version == Version(v), "unexpected version" + + +def test_versions_from_settings(alfred4, infopl2): + """Workflow: version from `update_settings`""" + vstr = '1.9.7' + d = { + 'github_slug': 'deanishe/alfred-workflow', + 'version': vstr, + } + with env(alfred_workflow_version=None): + wf = Workflow(update_settings=d) + assert str(wf.version) == vstr + assert isinstance(wf.version, Version) + assert wf.version == Version(vstr) + + +def test_versions_from_file(alfred4, infopl2): + """Workflow: version from `version` file""" + vstr = '1.9.7' + with env(alfred_workflow_version=None): + with VersionFile(vstr): + wf = Workflow() + assert str(wf.version) == vstr + assert isinstance(wf.version, Version) + assert wf.version == Version(vstr) + + +def test_versions_from_info(alfred4, infopl): + """Workflow: version from info.plist""" + with env(alfred_workflow_version=None): + wf = Workflow() + assert str(wf.version) == WORKFLOW_VERSION + assert isinstance(wf.version, Version) + assert wf.version == Version(WORKFLOW_VERSION) + + +def test_first_run_no_version(alfred4, infopl2): + """Workflow: first_run fails on no version""" + with env(alfred_workflow_version=None): + wf = Workflow() + try: + with pytest.raises(ValueError): + wf.first_run + finally: + wf.reset() + + +def test_first_run_with_version(alfred4, infopl): + """Workflow: first_run""" + vstr = '1.9.7' + with env(alfred_workflow_version=vstr): + wf = Workflow() + assert wf.first_run is True + wf.reset() + + +def test_first_run_with_previous_run(alfred4, infopl): + """Workflow: first_run with previous run""" + vstr = '1.9.7' + last_vstr = '1.9.6' + with env(alfred_workflow_version=vstr): + wf = Workflow() + wf.set_last_version(last_vstr) + assert wf.first_run is True + assert wf.last_version_run == Version(last_vstr) + wf.reset() + + +def test_last_version_empty(wf): + """Workflow: last_version_run empty""" + assert wf.last_version_run is None + + +def test_last_version_on(alfred4, infopl): + """Workflow: last_version_run not empty""" + vstr = '1.9.7' + + with env(alfred_workflow_version=vstr): + wf = Workflow() + wf.set_last_version(vstr) + assert Version(vstr) == wf.last_version_run + wf.reset() + + # Set automatically + with env(alfred_workflow_version=vstr): + wf = Workflow() + wf.set_last_version() + assert Version(vstr) == wf.last_version_run + wf.reset() + + +def test_versions_no_version(alfred4, infopl2): + """Workflow: version is `None`""" + with env(alfred_workflow_version=None): + wf = Workflow() + assert wf.version is None + wf.reset() + + +def test_last_version_no_version(alfred4, infopl2): + """Workflow: last_version no version""" + with env(alfred_workflow_version=None): + wf = Workflow() + assert wf.set_last_version() is False + wf.reset() + + +def test_last_version_explicit_version(alfred4, infopl): + """Workflow: last_version explicit version""" + vstr = '1.9.6' + wf = Workflow() + assert wf.set_last_version(vstr) is True + assert wf.last_version_run == Version(vstr) + wf.reset() + + +def test_last_version_auto_version(alfred4, infopl): + """Workflow: last_version auto version""" + vstr = '1.9.7' + with env(alfred_workflow_version=vstr): + wf = Workflow() + assert wf.set_last_version() is True + assert wf.last_version_run == Version(vstr) + wf.reset() + + +def test_last_version_set_after_run(alfred4, infopl): + """Workflow: last_version set after `run()`""" + vstr = '1.9.7' + + def cb(wf): + return + + with env(alfred_workflow_version=vstr): + wf = Workflow() + assert wf.last_version_run is None + wf.run(cb) + + wf = Workflow() + assert wf.last_version_run == Version(vstr) + wf.reset() + + +def test_alfred_version(wf): + """Workflow: alfred_version correct.""" + assert wf.alfred_version == Version('4.0') diff --git a/tests/test_workflow_xml.py b/tests/test_workflow_xml.py old mode 100755 new mode 100644 index e7c083be..7c3b0749 --- a/tests/test_workflow_xml.py +++ b/tests/test_workflow_xml.py @@ -22,14 +22,11 @@ from workflow import Workflow -from util import InfoPlist - @pytest.fixture(scope='function') -def wf(): +def wf(infopl): """Create a :class:`~workflow.Workflow` object.""" - with InfoPlist(): - yield Workflow() + yield Workflow() @contextmanager diff --git a/tests/util.py b/tests/util.py index e3c9aa79..a1e0c238 100644 --- a/tests/util.py +++ b/tests/util.py @@ -8,9 +8,7 @@ # Created on 2014-08-17 # -""" -Stuff used in multiple tests. -""" +"""Stuff used in multiple tests.""" from __future__ import print_function, unicode_literals @@ -38,31 +36,56 @@ 'key1': 'value1', 'key2': 'hübner', 'mutable1': ['mutable', 'object'], - 'mutable2': {'mutable': ['nested', 'object']}, + 'mutable2': {'mutable': ['nested', 'object']}, } +class MockCall(object): + """Capture calls to `subprocess.check_output`.""" + + def __init__(self): + """Create new MockCall.""" + self.cmd = None + self._check_output_orig = None + + def _set_up(self): + self._check_output_orig = subprocess.check_output + subprocess.check_output = self._check_output + + def _tear_down(self): + subprocess.check_output = self._check_output_orig + + def _check_output(self, cmd, **kwargs): + self.cmd = cmd + + def __enter__(self): + self._set_up() + return self + + def __exit__(self, *args): + self._tear_down() + + class WorkflowMock(object): """Context manager that overrides funcs and variables for testing. c = WorkflowMock() with c: - subprocess.call([arg1, arg2]) - c.cmd -> (arg1, arg2) + subprocess.call(['program', 'arg']) + c.cmd # -> ('program', 'arg') """ def __init__(self, argv=None, exit=True, call=True, stderr=False): """Context manager that overrides funcs and variables for testing. - :param argv: list of arguments to replace ``sys.argv`` with - :type argv: list - :param exit: Override ``sys.exit`` with noop? - :param call: Override :func:`subprocess.call` and capture its - arguments in :attr:`cmd`, :attr:`args` and :attr:`kwargs`? + Args: + argv (list): list of arguments to replace ``sys.argv`` with + exit (bool): override ``sys.exit`` with noop? + call (bool): override :func:`subprocess.call` and capture its + arguments in :attr:`cmd`, :attr:`args` and :attr:`kwargs`? """ - self.argv = argv self.override_exit = exit self.override_call = call @@ -123,7 +146,7 @@ class VersionFile(object): """Context manager to create and delete `version` file.""" def __init__(self, version, path=None): - + """Create new context manager.""" self.version = version self.path = path or VERSION_PATH @@ -142,6 +165,7 @@ class FakePrograms(object): """Context manager to inject fake programs into ``PATH``.""" def __init__(self, *names, **names2codes): + """Create new context manager.""" self.tempdir = None self.orig_path = None self.programs = {} @@ -159,7 +183,7 @@ def __enter__(self): # Add new programs to front of PATH self.orig_path = os.getenv('PATH') - os.environ['PATH'] = '{0}:{1}'.format(self.tempdir, self.orig_path) + os.environ['PATH'] = self.tempdir + ':' + self.orig_path def __exit__(self, *args): os.environ['PATH'] = self.orig_path @@ -192,6 +216,13 @@ def __exit__(self, *args): delete_info_plist(self.dest_path) +def dump_env(): + """Print `os.environ` to STDOUT.""" + for k, v in os.environ.items(): + if k.startswith('alfred_'): + print('env: %s=%s' % (k, v)) + + def create_info_plist(source=INFO_PLIST_TEST, dest=INFO_PLIST_PATH): """Symlink ``source`` to ``dest``.""" if os.path.exists(source) and not os.path.exists(dest): diff --git a/tox.ini b/tox.ini index 9d13b493..97a2975d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,3 +1,47 @@ +[flake8] +ignore = + D100, + ; Missing docstring in magic method + D105, + D200, + D201, + D203, + D204, + D400, + D401, + ; Missing blank line after last section + D413, + ; missing whitespace around arithmetic operator + E226, + E402, + ; flake8-future-import. Ignore everything to make all + ; from __future__ imports optional + FI10, + FI11, + FI12, + FI13, + FI14, + FI15, + FI16, + FI17, + FI50, + FI51, + FI52, + FI53, + FI54, + FI55, + FI56, + FI57, + FI90, + ; line break before binary operator + W503, + ; line break after binary operator + W504, + +[pydocstyle] +add_ignore = D105,D203,D266,D400,D401,D413 + + [pytest] ; addopts = --cov-report term-missing --cov workflow --capture=fd --doctest-modules ; addopts = --cov-report term-missing --capture=fd --doctest-modules diff --git a/workflow/__init__.py b/workflow/__init__.py index 2c4f8c06..17636a42 100644 --- a/workflow/__init__.py +++ b/workflow/__init__.py @@ -64,7 +64,7 @@ __version__ = open(os.path.join(os.path.dirname(__file__), 'version')).read() __author__ = 'Dean Jackson' __licence__ = 'MIT' -__copyright__ = 'Copyright 2014-2017 Dean Jackson' +__copyright__ = 'Copyright 2014-2019 Dean Jackson' __all__ = [ 'Variables', diff --git a/workflow/background.py b/workflow/background.py index cd5400be..ba5c52aa 100644 --- a/workflow/background.py +++ b/workflow/background.py @@ -8,8 +8,8 @@ # Created on 2014-04-06 # -""" -This module provides an API to run commands in background processes. +"""This module provides an API to run commands in background processes. + Combine with the :ref:`caching API ` to work from cached data while you fetch fresh data in the background. diff --git a/workflow/notify.py b/workflow/notify.py index 4542c78d..a4b7f40e 100644 --- a/workflow/notify.py +++ b/workflow/notify.py @@ -11,9 +11,10 @@ # TODO: Exclude this module from test and code coverage in py2.6 """ -Post notifications via the macOS Notification Center. This feature -is only available on Mountain Lion (10.8) and later. It will -silently fail on older systems. +Post notifications via the macOS Notification Center. + +This feature is only available on Mountain Lion (10.8) and later. +It will silently fail on older systems. The main API is a single function, :func:`~workflow.notify.notify`. @@ -198,7 +199,7 @@ def notify(title='', text='', sound=None): env = os.environ.copy() enc = 'utf-8' env['NOTIFY_TITLE'] = title.encode(enc) - env['NOTIFY_MESSAGE'] = text.encode(enc) + env['NOTIFY_MESSAGE'] = text.encode(enc) env['NOTIFY_SOUND'] = sound.encode(enc) cmd = [n] retcode = subprocess.call(cmd, env=env) diff --git a/workflow/update.py b/workflow/update.py index 44bd1a8e..ffc6353f 100644 --- a/workflow/update.py +++ b/workflow/update.py @@ -23,6 +23,9 @@ from __future__ import print_function, unicode_literals +from collections import defaultdict +from functools import total_ordering +import json import os import tempfile import re @@ -34,8 +37,8 @@ # __all__ = [] -RELEASES_BASE = 'https://api.github.com/repos/{0}/releases' - +RELEASES_BASE = 'https://api.github.com/repos/{}/releases' +match_workflow = re.compile(r'\.alfred(\d+)?workflow$').search _wf = None @@ -48,6 +51,153 @@ def wf(): return _wf +@total_ordering +class Download(object): + """A workflow file that is available for download. + + .. versionadded: 1.37 + + Attributes: + url (str): URL of workflow file. + filename (str): Filename of workflow file. + version (Version): Semantic version of workflow. + prerelease (bool): Whether version is a pre-release. + alfred_version (Version): Minimum compatible version + of Alfred. + + """ + + @classmethod + def from_dict(cls, d): + """Create a `Download` from a `dict`.""" + return cls(url=d['url'], filename=d['filename'], + version=Version(d['version']), + prerelease=d['prerelease']) + + @classmethod + def from_releases(cls, js): + """Extract downloads from GitHub releases. + + Searches releases with semantic tags for assets with + file extension .alfredworkflow or .alfredXworkflow where + X is a number. + + Files are returned sorted by latest version first. Any + releases containing multiple files with the same (workflow) + extension are rejected as ambiguous. + + Args: + js (str): JSON response from GitHub's releases endpoint. + + Returns: + list: Sequence of `Download`. + """ + releases = json.loads(js) + downloads = [] + for release in releases: + tag = release['tag_name'] + dupes = defaultdict(int) + try: + version = Version(tag) + except ValueError as err: + wf().logger.debug('ignored release: bad version "%s": %s', + tag, err) + continue + + dls = [] + for asset in release.get('assets', []): + url = asset.get('browser_download_url') + filename = os.path.basename(url) + m = match_workflow(filename) + if not m: + wf().logger.debug('unwanted file: %s', filename) + continue + + ext = m.group(0) + dupes[ext] = dupes[ext] + 1 + dls.append(Download(url, filename, version, + release['prerelease'])) + + valid = True + for ext, n in dupes.items(): + if n > 1: + wf().logger.debug('ignored release "%s": multiple assets ' + 'with extension "%s"', tag, ext) + valid = False + break + + if valid: + downloads.extend(dls) + + downloads.sort(reverse=True) + return downloads + + def __init__(self, url, filename, version, prerelease=False): + """Create a new Download. + + Args: + url (str): URL of workflow file. + filename (str): Filename of workflow file. + version (Version): Version of workflow. + prerelease (bool, optional): Whether version is + pre-release. Defaults to False. + + """ + if isinstance(version, basestring): + version = Version(version) + + self.url = url + self.filename = filename + self.version = version + self.prerelease = prerelease + + @property + def alfred_version(self): + """Minimum Alfred version based on filename extension.""" + m = match_workflow(self.filename) + if not m or not m.group(1): + return Version('0') + return Version(m.group(1)) + + @property + def dict(self): + """Convert `Download` to `dict`.""" + return dict(url=self.url, filename=self.filename, + version=str(self.version), prerelease=self.prerelease) + + def __str__(self): + """Format `Download` for printing.""" + u = ('Download(url={dl.url!r}, ' + 'filename={dl.filename!r}, ' + 'version={dl.version!r}, ' + 'prerelease={dl.prerelease!r})'.format(dl=self)) + + return u.encode('utf-8') + + def __repr__(self): + """Code-like representation of `Download`.""" + return str(self) + + def __eq__(self, other): + """Compare Downloads based on version numbers.""" + if self.url != other.url \ + or self.filename != other.filename \ + or self.version != other.version \ + or self.prerelease != other.prerelease: + return False + return True + + def __ne__(self, other): + """Compare Downloads based on version numbers.""" + return not self.__eq__(other) + + def __lt__(self, other): + """Compare Downloads based on version numbers.""" + if self.version != other.version: + return self.version < other.version + return self.alfred_version < other.alfred_version + + class Version(object): """Mostly semantic versioning. @@ -80,6 +230,9 @@ def __init__(self, vstr): Args: vstr (basestring): Semantic version string. """ + if not vstr: + raise ValueError('invalid version number: {!r}'.format(vstr)) + self.vstr = vstr self.major = 0 self.minor = 0 @@ -94,7 +247,7 @@ def _parse(self, vstr): else: m = self.match_version(vstr) if not m: - raise ValueError('invalid version number: {0}'.format(vstr)) + raise ValueError('invalid version number: {!r}'.format(vstr)) version, suffix = m.groups() parts = self._parse_dotted_string(version) @@ -104,7 +257,7 @@ def _parse(self, vstr): if len(parts): self.patch = parts.pop(0) if not len(parts) == 0: - raise ValueError('invalid version (too long) : {0}'.format(vstr)) + raise ValueError('version number too long: {!r}'.format(vstr)) if suffix: # Build info @@ -148,8 +301,8 @@ def __lt__(self, other): return True if other.suffix and not self.suffix: return False - return (self._parse_dotted_string(self.suffix) < - self._parse_dotted_string(other.suffix)) + return self._parse_dotted_string(self.suffix) \ + < self._parse_dotted_string(other.suffix) # t > o return False @@ -193,185 +346,151 @@ def __repr__(self): return "Version('{0}')".format(str(self)) -def download_workflow(url): - """Download workflow at ``url`` to a local temporary file. - - :param url: URL to .alfredworkflow file in GitHub repo - :returns: path to downloaded file - - """ - filename = url.split('/')[-1] - - if (not filename.endswith('.alfredworkflow') and - not filename.endswith('.alfred3workflow')): - raise ValueError('attachment not a workflow: {0}'.format(filename)) - - local_path = os.path.join(tempfile.gettempdir(), filename) - - wf().logger.debug( - 'downloading updated workflow from `%s` to `%s` ...', url, local_path) - - response = web.get(url) +def retrieve_download(dl): + """Saves a download to a temporary file and returns path. - with open(local_path, 'wb') as output: - output.write(response.content) + .. versionadded: 1.37 - return local_path + Args: + url (unicode): URL to .alfredworkflow file in GitHub repo - -def build_api_url(slug): - """Generate releases URL from GitHub slug. - - :param slug: Repo name in form ``username/repo`` - :returns: URL to the API endpoint for the repo's releases + Returns: + unicode: path to downloaded file """ - if len(slug.split('/')) != 2: - raise ValueError('invalid GitHub slug: {0}'.format(slug)) - - return RELEASES_BASE.format(slug) + if not match_workflow(dl.filename): + raise ValueError('attachment not a workflow: ' + dl.filename) + path = os.path.join(tempfile.gettempdir(), dl.filename) + wf().logger.debug('downloading update from ' + '%r to %r ...', dl.url, path) -def _validate_release(release): - """Return release for running version of Alfred.""" - alf3 = wf().alfred_version.major == 3 + r = web.get(dl.url) + r.raise_for_status() - downloads = {'.alfredworkflow': [], '.alfred3workflow': []} - dl_count = 0 - version = release['tag_name'] - - for asset in release.get('assets', []): - url = asset.get('browser_download_url') - if not url: # pragma: nocover - continue - - ext = os.path.splitext(url)[1].lower() - if ext not in downloads: - continue - - # Ignore Alfred 3-only files if Alfred 2 is running - if ext == '.alfred3workflow' and not alf3: - continue + r.save_to_path(path) - downloads[ext].append(url) - dl_count += 1 + return path - # download_urls.append(url) - if dl_count == 0: - wf().logger.warning( - 'invalid release (no workflow file): %s', version) - return None +def build_api_url(repo): + """Generate releases URL from GitHub repo. - for k in downloads: - if len(downloads[k]) > 1: - wf().logger.warning( - 'invalid release (multiple %s files): %s', k, version) - return None + Args: + repo (unicode): Repo name in form ``username/repo`` - # Prefer .alfred3workflow file if there is one and Alfred 3 is - # running. - if alf3 and len(downloads['.alfred3workflow']): - download_url = downloads['.alfred3workflow'][0] + Returns: + unicode: URL to the API endpoint for the repo's releases - else: - download_url = downloads['.alfredworkflow'][0] - - wf().logger.debug('release %s: %s', version, download_url) + """ + if len(repo.split('/')) != 2: + raise ValueError('invalid GitHub repo: {!r}'.format(repo)) - return { - 'version': version, - 'download_url': download_url, - 'prerelease': release['prerelease'] - } + return RELEASES_BASE.format(repo) -def get_valid_releases(github_slug, prereleases=False): - """Return list of all valid releases. +def get_downloads(repo): + """Load available ``Download``s for GitHub repo. - :param github_slug: ``username/repo`` for workflow's GitHub repo - :param prereleases: Whether to include pre-releases. - :returns: list of dicts. Each :class:`dict` has the form - ``{'version': '1.1', 'download_url': 'http://github.com/...', - 'prerelease': False }`` + .. versionadded: 1.37 + Args: + repo (unicode): GitHub repo to load releases for. - A valid release is one that contains one ``.alfredworkflow`` file. + Returns: + list: Sequence of `Download` contained in GitHub releases. + """ + url = build_api_url(repo) - If the GitHub version (i.e. tag) is of the form ``v1.1``, the leading - ``v`` will be stripped. + def _fetch(): + wf().logger.info('retrieving releases for %r ...', repo) + r = web.get(url) + r.raise_for_status() + return r.content - """ - api_url = build_api_url(github_slug) - releases = [] + key = 'github-releases-' + repo.replace('/', '-') + js = wf().cached_data(key, _fetch, max_age=60) - wf().logger.debug('retrieving releases list: %s', api_url) + return Download.from_releases(js) - def retrieve_releases(): - wf().logger.info( - 'retrieving releases: %s', github_slug) - return web.get(api_url).json() - slug = github_slug.replace('/', '-') - for release in wf().cached_data('gh-releases-' + slug, retrieve_releases): +def latest_download(dls, alfred_version=None, prereleases=False): + """Return newest `Download`.""" + alfred_version = alfred_version or os.getenv('alfred_version') + version = None + if alfred_version: + version = Version(alfred_version) - release = _validate_release(release) - if release is None: - wf().logger.debug('invalid release: %r', release) + dls.sort(reverse=True) + for dl in dls: + if dl.prerelease and not prereleases: + wf().logger.debug('ignored prerelease: %s', dl.version) continue - - elif release['prerelease'] and not prereleases: - wf().logger.debug('ignoring prerelease: %s', release['version']) + if version and dl.alfred_version > version: + wf().logger.debug('ignored incompatible (%s > %s): %s', + dl.alfred_version, version, dl.filename) continue - wf().logger.debug('release: %r', release) - - releases.append(release) + wf().logger.debug('latest version: %s (%s)', dl.version, dl.filename) + return dl - return releases + return None -def check_update(github_slug, current_version, prereleases=False): +def check_update(repo, current_version, prereleases=False, + alfred_version=None): """Check whether a newer release is available on GitHub. - :param github_slug: ``username/repo`` for workflow's GitHub repo - :param current_version: the currently installed version of the - workflow. :ref:`Semantic versioning ` is required. - :param prereleases: Whether to include pre-releases. - :type current_version: ``unicode`` - :returns: ``True`` if an update is available, else ``False`` + Args: + repo (unicode): ``username/repo`` for workflow's GitHub repo + current_version (unicode): the currently installed version of the + workflow. :ref:`Semantic versioning ` is required. + prereleases (bool): Whether to include pre-releases. + alfred_version (unicode): version of currently-running Alfred. + if empty, defaults to ``$alfred_version`` environment variable. + + Returns: + bool: ``True`` if an update is available, else ``False`` If an update is available, its version number and download URL will be cached. """ - releases = get_valid_releases(github_slug, prereleases) + key = '__workflow_latest_version' + # data stored when no update is available + no_update = { + 'available': False, + 'download': None, + 'version': None, + } + current = Version(current_version) - if not len(releases): - wf().logger.warning('no valid releases for %s', github_slug) - wf().cache_data('__workflow_update_status', {'available': False}) + dls = get_downloads(repo) + if not len(dls): + wf().logger.warning('no valid downloads for %s', repo) + wf().cache_data(key, no_update) return False - wf().logger.info('%d releases for %s', len(releases), github_slug) + wf().logger.info('%d download(s) for %s', len(dls), repo) - # GitHub returns releases newest-first - latest_release = releases[0] + dl = latest_download(dls, alfred_version, prereleases) - # (latest_version, download_url) = get_latest_release(releases) - vr = Version(latest_release['version']) - vl = Version(current_version) - wf().logger.debug('latest=%r, installed=%r', vr, vl) - if vr > vl: + if not dl: + wf().logger.warning('no compatible downloads for %s', repo) + wf().cache_data(key, no_update) + return False - wf().cache_data('__workflow_update_status', { - 'version': latest_release['version'], - 'download_url': latest_release['download_url'], - 'available': True - }) + wf().logger.debug('latest=%r, installed=%r', dl.version, current) + if dl.version > current: + wf().cache_data(key, { + 'version': str(dl.version), + 'download': dl.dict, + 'available': True, + }) return True - wf().cache_data('__workflow_update_status', {'available': False}) + wf().cache_data(key, no_update) return False @@ -381,49 +500,63 @@ def install_update(): :returns: ``True`` if an update is installed, else ``False`` """ - update_data = wf().cached_data('__workflow_update_status', max_age=0) + key = '__workflow_latest_version' + # data stored when no update is available + no_update = { + 'available': False, + 'download': None, + 'version': None, + } + status = wf().cached_data(key, max_age=0) - if not update_data or not update_data.get('available'): + if not status or not status.get('available'): wf().logger.info('no update available') return False - local_file = download_workflow(update_data['download_url']) + dl = status.get('download') + if not dl: + wf().logger.info('no download information') + return False + + path = retrieve_download(Download.from_dict(dl)) wf().logger.info('installing updated workflow ...') - subprocess.call(['open', local_file]) + subprocess.call(['open', path]) - update_data['available'] = False - wf().cache_data('__workflow_update_status', update_data) + wf().cache_data(key, no_update) return True if __name__ == '__main__': # pragma: nocover import sys + prereleases = False + def show_help(status=0): """Print help message.""" - print('Usage : update.py (check|install) ' - '[--prereleases] ') + print('usage: update.py (check|install) ' + '[--prereleases] ') sys.exit(status) argv = sys.argv[:] if '-h' in argv or '--help' in argv: show_help() - prereleases = '--prereleases' in argv - - if prereleases: + if '--prereleases' in argv: argv.remove('--prereleases') + prereleases = True if len(argv) != 4: show_help(1) - action, github_slug, version = argv[1:] + action = argv[1] + repo = argv[2] + version = argv[3] try: if action == 'check': - check_update(github_slug, version, prereleases) + check_update(repo, version, prereleases) elif action == 'install': install_update() else: diff --git a/workflow/util.py b/workflow/util.py index 257654e1..27209d81 100644 --- a/workflow/util.py +++ b/workflow/util.py @@ -18,6 +18,7 @@ import errno import fcntl import functools +import json import os import signal import subprocess @@ -25,26 +26,24 @@ from threading import Event import time -# AppleScript to call an External Trigger in Alfred -AS_TRIGGER = """ -tell application "Alfred 3" -run trigger "{name}" in workflow "{bundleid}" {arg} -end tell -""" - -# AppleScript to save a variable in info.plist -AS_CONFIG_SET = """ -tell application "Alfred 3" -set configuration "{name}" to value "{value}" in workflow "{bundleid}" {export} -end tell -""" - -# AppleScript to remove a variable from info.plist -AS_CONFIG_UNSET = """ -tell application "Alfred 3" -remove configuration "{name}" in workflow "{bundleid}" -end tell -""" +# JXA scripts to call Alfred's API via the Scripting Bridge +# {app} is automatically replaced with "Alfred 3" or +# "com.runningwithcrayons.Alfred" depending on version. +# +# Open Alfred in search (regular) mode +JXA_SEARCH = "Application({app}).search({arg});" +# Open Alfred's File Actions on an argument +JXA_ACTION = "Application({app}).action({arg});" +# Open Alfred's navigation mode at path +JXA_BROWSE = "Application({app}).browse({arg});" +# Set the specified theme +JXA_SET_THEME = "Application({app}).setTheme({arg});" +# Call an External Trigger +JXA_TRIGGER = "Application({app}).runTrigger({arg}, {opts});" +# Save a variable to the workflow configuration sheet/info.plist +JXA_SET_CONFIG = "Application({app}).setConfiguration({arg}, {opts});" +# Delete a variable from the workflow configuration sheet/info.plist +JXA_UNSET_CONFIG = "Application({app}).removeConfiguration({arg}, {opts});" class AcquisitionError(Exception): @@ -71,6 +70,27 @@ class AcquisitionError(Exception): """ +def jxa_app_name(): + """Return name of application to call currently running Alfred. + + .. versionadded: 1.37 + + Returns 'Alfred 3' or 'com.runningwithcrayons.Alfred' depending + on which version of Alfred is running. + + This name is suitable for use with ``Application(name)`` in JXA. + + Returns: + unicode: Application name or ID. + + """ + if os.getenv('alfred_version', '').startswith('3'): + # Alfred 3 + return u'Alfred 3' + # Alfred 4+ + return u'com.runningwithcrayons.Alfred' + + def unicodify(s, encoding='utf-8', norm=None): """Ensure string is Unicode. @@ -130,10 +150,9 @@ def applescriptify(s): Replaces ``"`` with `"& quote &"`. Use this function if you want to insert a string into an AppleScript script: - >>> script = 'tell application "Alfred 3" to search "{}"' >>> query = 'g "python" test' - >>> script.format(applescriptify(query)) - 'tell application "Alfred 3" to search "g " & quote & "python" & quote & "test"' + >>> applescriptify(query) + 'g " & quote & "python" & quote & "test' Args: s (unicode): Unicode string to escape. @@ -183,7 +202,12 @@ def run_applescript(script, *args, **kwargs): str: Output of run command. """ - cmd = ['/usr/bin/osascript', '-l', kwargs.get('lang', 'AppleScript')] + lang = 'AppleScript' + if 'lang' in kwargs: + lang = kwargs['lang'] + del kwargs['lang'] + + cmd = ['/usr/bin/osascript', '-l', lang] if os.path.exists(script): cmd += [script] @@ -192,7 +216,7 @@ def run_applescript(script, *args, **kwargs): cmd.extend(args) - return run_command(cmd) + return run_command(cmd, **kwargs) def run_jxa(script, *args): @@ -227,18 +251,17 @@ def run_trigger(name, bundleid=None, arg=None): arg (str, optional): Argument to pass to trigger. """ - if not bundleid: - bundleid = os.getenv('alfred_workflow_bundleid') - + bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + appname = jxa_app_name() + opts = {'inWorkflow': bundleid} if arg: - arg = 'with argument "{}"'.format(applescriptify(arg)) - else: - arg = '' + opts['withArgument'] = arg - script = AS_TRIGGER.format(name=name, bundleid=bundleid, - arg=arg) + script = JXA_TRIGGER.format(app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True)) - run_applescript(script) + run_applescript(script, lang='JavaScript') def set_config(name, value, bundleid=None, exportable=False): @@ -254,22 +277,19 @@ def set_config(name, value, bundleid=None, exportable=False): as exportable (Don't Export checkbox). """ - if not bundleid: - bundleid = os.getenv('alfred_workflow_bundleid') + bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + appname = jxa_app_name() + opts = { + 'toValue': value, + 'inWorkflow': bundleid, + 'exportable': exportable, + } - name = applescriptify(name) - value = applescriptify(value) - bundleid = applescriptify(bundleid) + script = JXA_SET_CONFIG.format(app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True)) - if exportable: - export = 'exportable true' - else: - export = 'exportable false' - - script = AS_CONFIG_SET.format(name=name, bundleid=bundleid, - value=value, export=export) - - run_applescript(script) + run_applescript(script, lang='JavaScript') def unset_config(name, bundleid=None): @@ -282,15 +302,15 @@ def unset_config(name, bundleid=None): bundleid (str, optional): Bundle ID of workflow variable belongs to. """ - if not bundleid: - bundleid = os.getenv('alfred_workflow_bundleid') + bundleid = bundleid or os.getenv('alfred_workflow_bundleid') + appname = jxa_app_name() + opts = {'inWorkflow': bundleid} - name = applescriptify(name) - bundleid = applescriptify(bundleid) + script = JXA_UNSET_CONFIG.format(app=json.dumps(appname), + arg=json.dumps(name), + opts=json.dumps(opts, sort_keys=True)) - script = AS_CONFIG_UNSET.format(name=name, bundleid=bundleid) - - run_applescript(script) + run_applescript(script, lang='JavaScript') def appinfo(name): @@ -411,10 +431,9 @@ def acquire(self, blocking=True): start = time.time() while True: - # Raise error if we've been waiting too long to acquire the lock if self.timeout and (time.time() - start) >= self.timeout: - raise AcquisitionError('lock acquisition timed out') + raise AcquisitionError('lock acquisition timed out') # If already locked, wait then try again if self.locked: diff --git a/workflow/version b/workflow/version index 1caa5ab8..71437d03 100644 --- a/workflow/version +++ b/workflow/version @@ -1 +1 @@ -1.36 \ No newline at end of file +1.37 \ No newline at end of file diff --git a/workflow/web.py b/workflow/web.py index d64bb6f2..07819115 100644 --- a/workflow/web.py +++ b/workflow/web.py @@ -24,7 +24,7 @@ import zlib -USER_AGENT = u'Alfred-Workflow/1.19 (+http://www.deanishe.net/alfred-workflow)' +USER_AGENT = u'Alfred-Workflow/1.36 (+http://www.deanishe.net/alfred-workflow)' # Valid characters for multipart form data boundaries BOUNDARY_CHARS = string.digits + string.ascii_letters @@ -100,6 +100,7 @@ class NoRedirectHandler(urllib2.HTTPRedirectHandler): """Prevent redirections.""" def redirect_request(self, *args): + """Ignore redirect.""" return None @@ -136,6 +137,7 @@ def __setitem__(self, key, value): return dict.__setitem__(self, key.lower(), {'key': key, 'val': value}) def get(self, key, default=None): + """Return value for case-insensitive key or default.""" try: v = dict.__getitem__(self, key.lower()) except KeyError: @@ -144,27 +146,34 @@ def get(self, key, default=None): return v['val'] def update(self, other): + """Update values from other ``dict``.""" for k, v in other.items(): self[k] = v def items(self): + """Return ``(key, value)`` pairs.""" return [(v['key'], v['val']) for v in dict.itervalues(self)] def keys(self): + """Return original keys.""" return [v['key'] for v in dict.itervalues(self)] def values(self): + """Return all values.""" return [v['val'] for v in dict.itervalues(self)] def iteritems(self): + """Iterate over ``(key, value)`` pairs.""" for v in dict.itervalues(self): yield v['key'], v['val'] def iterkeys(self): + """Iterate over original keys.""" for v in dict.itervalues(self): yield v['key'] def itervalues(self): + """Interate over values.""" for v in dict.itervalues(self): yield v['val'] @@ -240,8 +249,8 @@ def __init__(self, request, stream=False): # Transfer-Encoding appears to not be used in the wild # (contrary to the HTTP standard), but no harm in testing # for it - if ('gzip' in headers.get('content-encoding', '') or - 'gzip' in headers.get('transfer-encoding', '')): + if 'gzip' in headers.get('content-encoding', '') or \ + 'gzip' in headers.get('transfer-encoding', ''): self._gzipped = True @property @@ -250,6 +259,7 @@ def stream(self): Returns: bool: `True` if response is streamed. + """ return self._stream @@ -343,20 +353,18 @@ def iter_content(self, chunk_size=4096, decode_unicode=False): "`content` has already been read from this Response.") def decode_stream(iterator, r): - - decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') + dec = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: - data = decoder.decode(chunk) + data = dec.decode(chunk) if data: yield data - data = decoder.decode(b'', final=True) + data = dec.decode(b'', final=True) if data: # pragma: no cover yield data def generate(): - if self._gzipped: decoder = zlib.decompressobj(16 + zlib.MAX_WBITS) @@ -427,15 +435,15 @@ def _get_encoding(self): if not self.stream: # Try sniffing response content # Encoding declared in document should override HTTP headers if self.mimetype == 'text/html': # sniff HTML headers - m = re.search("""""", + m = re.search(r"""""", self.content) if m: encoding = m.group(1) - elif ((self.mimetype.startswith('application/') or - self.mimetype.startswith('text/')) and - 'xml' in self.mimetype): - m = re.search("""]*\?>""", + elif ((self.mimetype.startswith('application/') + or self.mimetype.startswith('text/')) + and 'xml' in self.mimetype): + m = re.search(r"""]*\?>""", self.content) if m: encoding = m.group(1) @@ -628,7 +636,6 @@ def get_content_type(filename): :rtype: str """ - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' boundary = '-----' + ''.join(random.choice(BOUNDARY_CHARS) diff --git a/workflow/workflow.py b/workflow/workflow.py index c2c16169..584f376e 100644 --- a/workflow/workflow.py +++ b/workflow/workflow.py @@ -43,8 +43,9 @@ except ImportError: # pragma: no cover import xml.etree.ElementTree as ET +# imported to maintain API +from util import AcquisitionError # noqa: F401 from util import ( - AcquisitionError, # imported to maintain API atomic_writer, LockFile, uninterruptible, @@ -892,9 +893,9 @@ class Workflow(object): storing & caching data, using Keychain, and generating Script Filter feedback. - ``Workflow`` is compatible with both Alfred 2 and 3. The - :class:`~workflow.Workflow3` subclass provides additional, - Alfred 3-only features, such as workflow variables. + ``Workflow`` is compatible with Alfred 2+. Subclass + :class:`~workflow.Workflow3` provides additional features, + only available in Alfred 3+, such as workflow variables. :param default_settings: default workflow settings. If no settings file exists, :class:`Workflow.settings` will be pre-populated with @@ -965,8 +966,9 @@ def __init__(self, default_settings=None, update_settings=None, self._last_version_run = UNSET # Cache for regex patterns created for filter keys self._search_pattern_cache = {} - # Magic arguments - #: The prefix for all magic arguments. Default is ``workflow:`` + #: Prefix for all magic arguments. + #: The default value is ``workflow:`` so keyword + #: ``config`` would match user query ``workflow:config``. self.magic_prefix = 'workflow:' #: Mapping of available magic arguments. The built-in magic #: arguments are registered by default. To add your own magic arguments @@ -1111,12 +1113,7 @@ def debugging(self): :rtype: ``bool`` """ - if self._debugging is None: - if self.alfred_env.get('debug') == 1: - self._debugging = True - else: - self._debugging = False - return self._debugging + return self.alfred_env.get('debug') == '1' @property def name(self): @@ -1225,14 +1222,18 @@ def cachedir(self): """Path to workflow's cache directory. The cache directory is a subdirectory of Alfred's own cache directory - in ``~/Library/Caches``. The full path is: + in ``~/Library/Caches``. The full path is in Alfred 4+ is: + + ``~/Library/Caches/com.runningwithcrayons.Alfred/Workflow Data/`` + + For earlier versions: ``~/Library/Caches/com.runningwithcrayons.Alfred-X/Workflow Data/`` - ``Alfred-X`` may be ``Alfred-2`` or ``Alfred-3``. + where ``Alfred-X`` may be ``Alfred-2`` or ``Alfred-3``. - :returns: full path to workflow's cache directory - :rtype: ``unicode`` + Returns: + unicode: full path to workflow's cache directory """ if self.alfred_env.get('workflow_cache'): @@ -1257,12 +1258,18 @@ def datadir(self): """Path to workflow's data directory. The data directory is a subdirectory of Alfred's own data directory in - ``~/Library/Application Support``. The full path is: + ``~/Library/Application Support``. The full path for Alfred 4+ is: - ``~/Library/Application Support/Alfred 2/Workflow Data/`` + ``~/Library/Application Support/Alfred/Workflow Data/`` - :returns: full path to workflow data directory - :rtype: ``unicode`` + For earlier versions, the path is: + + ``~/Library/Application Support/Alfred X/Workflow Data/`` + + where ``Alfred X` is ``Alfred 2`` or ``Alfred 3``. + + Returns: + unicode: full path to workflow data directory """ if self.alfred_env.get('workflow_data'): @@ -1284,8 +1291,8 @@ def _default_datadir(self): def workflowdir(self): """Path to workflow's root directory (where ``info.plist`` is). - :returns: full path to workflow root directory - :rtype: ``unicode`` + Returns: + unicode: full path to workflow root directory """ if not self._workflowdir: @@ -2261,17 +2268,16 @@ def update_available(self): :returns: ``True`` if an update is available, else ``False`` """ + key = '__workflow_latest_version' # Create a new workflow object to ensure standard serialiser # is used (update.py is called without the user's settings) - update_data = Workflow().cached_data('__workflow_update_status', - max_age=0) - - self.logger.debug('update_data: %r', update_data) + status = Workflow().cached_data(key, max_age=0) - if not update_data or not update_data.get('available'): + # self.logger.debug('update status: %r', status) + if not status or not status.get('available'): return False - return update_data['available'] + return status['available'] @property def prereleases(self): @@ -2304,6 +2310,7 @@ def check_update(self, force=False): :type force: ``Boolean`` """ + key = '__workflow_latest_version' frequency = self._update_settings.get('frequency', DEFAULT_UPDATE_FREQUENCY) @@ -2312,10 +2319,9 @@ def check_update(self, force=False): return # Check for new version if it's time - if (force or not self.cached_data_fresh( - '__workflow_update_status', frequency * 86400)): + if (force or not self.cached_data_fresh(key, frequency * 86400)): - github_slug = self._update_settings['github_slug'] + repo = self._update_settings['github_slug'] # version = self._update_settings['version'] version = str(self.version) @@ -2325,8 +2331,7 @@ def check_update(self, force=False): update_script = os.path.join(os.path.dirname(__file__), b'update.py') - cmd = ['/usr/bin/python', update_script, 'check', github_slug, - version] + cmd = ['/usr/bin/python', update_script, 'check', repo, version] if self.prereleases: cmd.append('--prereleases') @@ -2352,11 +2357,11 @@ def start_update(self): """ import update - github_slug = self._update_settings['github_slug'] + repo = self._update_settings['github_slug'] # version = self._update_settings['version'] version = str(self.version) - if not update.check_update(github_slug, version, self.prereleases): + if not update.check_update(repo, version, self.prereleases): return False from background import run_in_background @@ -2365,8 +2370,7 @@ def start_update(self): update_script = os.path.join(os.path.dirname(__file__), b'update.py') - cmd = ['/usr/bin/python', update_script, 'install', github_slug, - version] + cmd = ['/usr/bin/python', update_script, 'install', repo, version] if self.prereleases: cmd.append('--prereleases') diff --git a/workflow/workflow3.py b/workflow/workflow3.py index a6c07c94..b92c4be0 100644 --- a/workflow/workflow3.py +++ b/workflow/workflow3.py @@ -7,11 +7,11 @@ # Created on 2016-06-25 # -"""An Alfred 3-only version of :class:`~workflow.Workflow`. +"""An Alfred 3+ version of :class:`~workflow.Workflow`. -:class:`~workflow.Workflow3` supports Alfred 3's new features, such as +:class:`~workflow.Workflow3` supports new features, such as setting :ref:`workflow-variables` and -:class:`the more advanced modifiers ` supported by Alfred 3. +:class:`the more advanced modifiers ` supported by Alfred 3+. In order for the feedback mechanism to work correctly, it's important to create :class:`Item3` and :class:`Modifier` objects via the @@ -250,7 +250,7 @@ def _icon(self): class Item3(object): - """Represents a feedback item for Alfred 3. + """Represents a feedback item for Alfred 3+. Generates Alfred-compliant JSON for a single item. @@ -447,7 +447,7 @@ def _modifiers(self): class Workflow3(Workflow): - """Workflow class that generates Alfred 3 feedback. + """Workflow class that generates Alfred 3+ feedback. It is a subclass of :class:`~workflow.Workflow` and most of its methods are documented there. @@ -476,18 +476,18 @@ def __init__(self, **kwargs): @property def _default_cachedir(self): - """Alfred 3's default cache directory.""" + """Alfred 4's default cache directory.""" return os.path.join( os.path.expanduser( - '~/Library/Caches/com.runningwithcrayons.Alfred-3/' + '~/Library/Caches/com.runningwithcrayons.Alfred/' 'Workflow Data/'), self.bundleid) @property def _default_datadir(self): - """Alfred 3's default data directory.""" + """Alfred 4's default data directory.""" return os.path.join(os.path.expanduser( - '~/Library/Application Support/Alfred 3/Workflow Data/'), + '~/Library/Application Support/Alfred/Workflow Data/'), self.bundleid) @property @@ -707,6 +707,7 @@ def warn_empty(self, title, subtitle=u'', icon=None): Returns: Item3: Newly-created item. + """ if len(self._items): return