diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 51a14935f5..8b6cf520bf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ default_stages: [commit, manual] repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.1.8 hooks: - id: ruff name: "ruff on kedro/, tests/ and docs/" @@ -32,14 +32,6 @@ repos: - id: requirements-txt-fixer # Sorts entries in requirements.txt exclude: "^kedro/templates/|^features/steps/test_starter/" - - repo: https://github.com/asottile/blacken-docs - rev: v1.12.1 - hooks: - - id: blacken-docs - additional_dependencies: - - black~=23.0 - entry: blacken-docs --skip-errors - - repo: local hooks: - id: imports @@ -56,9 +48,3 @@ repos: exclude: ^features/steps/test_starter pass_filenames: false entry: make secret-scan - - id: bandit - name: "Bandit security check" - language: system - types: [file, python] - exclude: ^kedro/templates/|^tests/|^features/steps/test_starter - entry: bandit -ll diff --git a/README.md b/README.md index 5504767806..c2f52bada2 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ Our [Get Started guide](https://docs.kedro.org/en/stable/get_started/install.htm ## What are the main features of Kedro? -| Feature | What is this? | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Project Template | A standard, modifiable and easy-to-use project template based on [Cookiecutter Data Science](https://github.com/drivendata/cookiecutter-data-science/). | -| Data Catalog | A series of lightweight data connectors used to save and load data across many different file formats and file systems, including local and network file systems, cloud object stores, and HDFS. The Data Catalog also includes data and model versioning for file-based systems. | -| Pipeline Abstraction | Automatic resolution of dependencies between pure Python functions and data pipeline visualisation using [Kedro-Viz](https://github.com/kedro-org/kedro-viz). | -| Coding Standards | Test-driven development using [`pytest`](https://github.com/pytest-dev/pytest), produce well-documented code using [Sphinx](http://www.sphinx-doc.org/en/master/), create linted code with support for [`flake8`](https://github.com/PyCQA/flake8), [`isort`](https://github.com/PyCQA/isort) and [`black`](https://github.com/psf/black) and make use of the standard Python logging library. | -| Flexible Deployment | Deployment strategies that include single or distributed-machine deployment as well as additional support for deploying on Argo, Prefect, Kubeflow, AWS Batch and Databricks. | +| Feature | What is this? | +| -------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Project Template | A standard, modifiable and easy-to-use project template based on [Cookiecutter Data Science](https://github.com/drivendata/cookiecutter-data-science/). | +| Data Catalog | A series of lightweight data connectors used to save and load data across many different file formats and file systems, including local and network file systems, cloud object stores, and HDFS. The Data Catalog also includes data and model versioning for file-based systems. | +| Pipeline Abstraction | Automatic resolution of dependencies between pure Python functions and data pipeline visualisation using [Kedro-Viz](https://github.com/kedro-org/kedro-viz). | +| Coding Standards | Test-driven development using [`pytest`](https://github.com/pytest-dev/pytest), produce well-documented code using [Sphinx](http://www.sphinx-doc.org/en/master/), create linted code with support for [`ruff`](https://github.com/astral-sh/ruff) and make use of the standard Python logging library. | +| Flexible Deployment | Deployment strategies that include single or distributed-machine deployment as well as additional support for deploying on Argo, Prefect, Kubeflow, AWS Batch and Databricks. | ## How do I use Kedro? diff --git a/RELEASE.md b/RELEASE.md index 6ce2f56f7b..22ab2ea0c6 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -8,6 +8,7 @@ * Removed setuptools dependency * Added `source_dir` explicitly in `pyproject.toml` for non-src layout project. * `MemoryDataset` entries are now included in free outputs. +* Removed black dependency and replaced it functionality with `ruff format`. ## Breaking changes to the API * Added logging about not using async mode in `SequentiallRunner` and `ParallelRunner`. diff --git a/docs/source/deployment/prefect.md b/docs/source/deployment/prefect.md index c21de72b78..91d71e30c5 100644 --- a/docs/source/deployment/prefect.md +++ b/docs/source/deployment/prefect.md @@ -163,7 +163,7 @@ def kedro_init( context = session.load_context() catalog = context.catalog logger.info("Registering datasets...") - unregistered_ds = pipeline.datasets() - set(catalog.list()) # NOQA + unregistered_ds = pipeline.datasets() - set(catalog.list()) for ds_name in unregistered_ds: catalog.add(ds_name, MemoryDataset()) return {"catalog": catalog, "sess_id": session.session_id} diff --git a/docs/source/development/linting.md b/docs/source/development/linting.md index 8beec39d1a..61989cdf85 100644 --- a/docs/source/development/linting.md +++ b/docs/source/development/linting.md @@ -9,34 +9,36 @@ Linting tools check your code for errors such as a missing bracket or line inden As a project grows and goes through various stages of development it becomes important to maintain code quality. Using a consistent format and linting your code ensures that it is consistent, readable, and easy to debug and maintain. ## Set up Python tools -There are a variety of Python tools available to use with your Kedro projects. This guide shows you how to use [`black`](https://github.com/psf/black) and [`ruff`](https://beta.ruff.rs). -- **`black`** is a [PEP 8](https://peps.python.org/pep-0008/) compliant opinionated Python code formatter. `black` can -check for styling inconsistencies and reformat your files in place. -[You can read more in the `black` documentation](https://black.readthedocs.io/en/stable/). -- **`ruff`** is a fast linter that replaces `flake8`, `pylint`, `pyupgrade`, `isort` and [more](https://beta.ruff.rs/docs/rules/). - - It helps to make your code compliant to [`pep8`](https://pypi.org/project/pep8/). - - It reformats code by sorting imports alphabetically and automatically separating them into sections by -type. [You can read more in the `isort` documentation](https://pycqa.github.io/isort/). +There are a variety of Python tools available to use with your Kedro projects. This guide shows you how to use [`ruff`](https://beta.ruff.rs). + +**`ruff`** is a fast linter and formatter that replaces `flake8`, `pylint`, `pyupgrade`, `isort`, `black` and [more](https://beta.ruff.rs/docs/rules/). + - It helps to make your code compliant to [PEP 8](https://peps.python.org/pep-0008/). + - It reformats code and sorts imports alphabetically and automatically separating them into sections by +type. ### Install the tools -Install `black` and `ruff` by adding the following lines to your project's `requirements.txt` +Install `ruff` by adding the following lines to your project's `requirements.txt` file: ```text -black # Used for formatting code ruff # Used for linting, formatting and sorting module imports - ``` + To install all the project-specific dependencies, including the linting tools, navigate to the root directory of the project and run: + ```bash pip install -r requirements.txt ``` + Alternatively, you can individually install the linting tools using the following shell commands: + ```bash -pip install black ruff +pip install ruff ``` + #### Configure `ruff` + `ruff` read configurations from `pyproject.toml` within your project root. You can enable different rule sets within the `[tool.ruff]` section. For example, the rule set `F` is equivalent to `Pyflakes`. To start with `ruff`, we recommend adding this section to enable a few basic rules sets. @@ -50,7 +52,6 @@ select = [ "I", # isort "PL", # Pylint ] -ignore = ["E501"] # Black take care off line-too-long ``` ```{note} @@ -58,12 +59,13 @@ It is a good practice to [split your line when it is too long](https://beta.ruff ``` ### Run the tools + Use the following commands to run lint checks: ```bash -black --check +ruff format --check ruff check ``` -You can also have `black` automatically format your code by omitting the `--check` flag. +You can also have `ruff format` automatically format your code by omitting the `--check` flag. ## Automated formatting and linting with `pre-commit` hooks @@ -72,36 +74,44 @@ These hooks are run before committing your code to your repositories to automati making code reviews easier and less time-consuming. ### Install `pre-commit` + You can install `pre-commit` along with other dependencies by including it in the `requirements.txt` file of your Kedro project by adding the following line: + ```text pre-commit ``` You can also install `pre-commit` using the following command: + ```bash pip install pre-commit ``` + ### Add `pre-commit` configuration file + Create a file named `.pre-commit-config.yaml` in your Kedro project root directory. You can add entries for the hooks you want to run before each `commit`. -Below is a sample `YAML` file with entries for `ruff` and black`: +Below is a sample `YAML` file with entries for `ruff`: + ```yaml repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.0.270 + rev: '' # Replace with latest version, for example 'v0.1.8' hooks: - id: ruff - - - repo: https://github.com/psf/black - rev: 22.8.0 - hooks: - - id: black - language_version: python3.9 + args: [--fix] + - id: ruff-format ``` + +See GitHub for [the latest configuration for ruff's pre-commit](https://github.com/astral-sh/ruff-pre-commit). + ### Install git hook scripts + Run the following command to complete installation: + ```bash pre-commit install ``` + This enables `pre-commit` hooks to run automatically every time you execute `git commit`. diff --git a/docs/source/get_started/new_project.md b/docs/source/get_started/new_project.md index 70b780e322..984680dff8 100644 --- a/docs/source/get_started/new_project.md +++ b/docs/source/get_started/new_project.md @@ -31,7 +31,7 @@ Next, the CLI asks which tools you'd like to include in the project: ```text Tools -1) Lint: Basic linting with Black and Ruff +1) Lint: Basic linting with ruff 2) Test: Basic testing with pytest 3) Log: Additional, environment-specific logging options 4) Docs: A Sphinx documentation setup @@ -47,7 +47,6 @@ The options are described in more detail in the [documentation about the new pro Select the tools by number, or `all` or follow the default to add `none`. - ### Project examples Finally, the CLI offers the option to include starter example code in the project: diff --git a/docs/source/index.rst b/docs/source/index.rst index 6c8329b7fd..028d858054 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -47,9 +47,9 @@ Welcome to Kedro's award-winning documentation! :target: https://linen-slack.kedro.org/ :alt: Kedro's Slack archive -.. image:: https://img.shields.io/badge/code%20style-black-black.svg - :target: https://github.com/psf/black - :alt: Code style is Black +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json + :target: https://github.com/astral-sh/ruff + :alt: Linted and Formatted with Ruff .. image:: https://bestpractices.coreinfrastructure.org/projects/6711/badge :target: https://bestpractices.coreinfrastructure.org/projects/6711 diff --git a/docs/source/starters/new_project_tools.md b/docs/source/starters/new_project_tools.md index 1de6a1a1f3..96b5ed4ff7 100644 --- a/docs/source/starters/new_project_tools.md +++ b/docs/source/starters/new_project_tools.md @@ -190,7 +190,7 @@ With these installed, you can then make use of the following commands to format ```bash ruff format path/to/project/root -black path/to/project/root --check +ruff check path/to/project/root ``` Though it has no impact on how your code works, linting is important for code quality because improves consistency, readability, debugging, and maintainability. To learn more about linting your Kedro projects, check our [linting documentation](../development/linting.md). diff --git a/docs/source/tutorial/tutorial_template.md b/docs/source/tutorial/tutorial_template.md index a4e3af35db..2b2c45cc82 100644 --- a/docs/source/tutorial/tutorial_template.md +++ b/docs/source/tutorial/tutorial_template.md @@ -32,10 +32,9 @@ The spaceflights project dependencies are stored in `requirements.txt`(you may f ```text # code quality packages -black~=22.0 ipython>=7.31.1, <8.0; python_version < '3.8' ipython~=8.10; python_version >= '3.8' -ruff~=0.0.290 +ruff==0.1.8 # notebook tooling jupyter~=1.0 diff --git a/features/steps/sh_run.py b/features/steps/sh_run.py index c925c02797..c201d5b9ef 100644 --- a/features/steps/sh_run.py +++ b/features/steps/sh_run.py @@ -36,7 +36,7 @@ def run( """ if isinstance(cmd, str) and split: cmd = shlex.split(cmd) - result = subprocess.run(cmd, input="", capture_output=True, **kwargs) # noqa: PLW1510 + result = subprocess.run(cmd, input="", capture_output=True, **kwargs) # noqa: PLW1510, S603 result.stdout = result.stdout.decode("utf-8") result.stderr = result.stderr.decode("utf-8") if print_output: @@ -59,9 +59,9 @@ def check_run(cmd: list | str, print_output: bool = False) -> None: split_cmd = cmd if print_output: - subprocess.check_call(split_cmd) + subprocess.check_call(split_cmd) # noqa: S603 else: - subprocess.check_call(split_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + subprocess.check_call(split_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603 class ChildTerminatingPopen(subprocess.Popen): diff --git a/features/steps/test_starter/{{ cookiecutter.repo_name }}/requirements.txt b/features/steps/test_starter/{{ cookiecutter.repo_name }}/requirements.txt index 7904db0fe8..fb756bd0f3 100644 --- a/features/steps/test_starter/{{ cookiecutter.repo_name }}/requirements.txt +++ b/features/steps/test_starter/{{ cookiecutter.repo_name }}/requirements.txt @@ -1,4 +1,4 @@ -black~=22.0 +ruff==0.1.8 ipython>=8.10 jupyterlab>=3.0 notebook diff --git a/kedro/framework/cli/catalog.py b/kedro/framework/cli/catalog.py index 3208ba0de1..6e386711c7 100644 --- a/kedro/framework/cli/catalog.py +++ b/kedro/framework/cli/catalog.py @@ -23,7 +23,6 @@ def _create_session(package_name: str, **kwargs: Any) -> KedroSession: return KedroSession.create(**kwargs) -# noqa: missing-function-docstring @click.group(name="Kedro") def catalog_cli() -> None: # pragma: no cover pass @@ -34,7 +33,6 @@ def catalog() -> None: """Commands for working with catalog.""" -# noqa: too-many-locals,protected-access @catalog.command("list") @env_option @click.option( @@ -173,7 +171,7 @@ def create_catalog(metadata: ProjectMetadata, pipeline_name: str, env: str) -> N catalog_datasets = { ds_name - for ds_name in context.catalog._datasets.keys() # noqa: protected-access + for ds_name in context.catalog._datasets.keys() if not ds_name.startswith("params:") and ds_name != "parameters" } diff --git a/kedro/framework/cli/cli.py b/kedro/framework/cli/cli.py index c65fe70556..5ecd5b9f9c 100644 --- a/kedro/framework/cli/cli.py +++ b/kedro/framework/cli/cli.py @@ -29,7 +29,7 @@ _get_entry_points, load_entry_points, ) -from kedro.framework.project import LOGGING # noqa # noqa: unused-import +from kedro.framework.project import LOGGING # noqa: F401 from kedro.framework.startup import _is_project, bootstrap_project LOGO = rf""" @@ -149,7 +149,6 @@ def global_groups(self) -> Sequence[click.MultiCommand]: @property def project_groups(self) -> Sequence[click.MultiCommand]: - # noqa: line-too-long """Property which loads all project command groups from the project and the plugins, then combines them with the built-in ones. Built-in commands can be overridden by plugins, which can be diff --git a/kedro/framework/cli/hooks/manager.py b/kedro/framework/cli/hooks/manager.py index 8b07a7b746..ba21adc356 100644 --- a/kedro/framework/cli/hooks/manager.py +++ b/kedro/framework/cli/hooks/manager.py @@ -1,5 +1,4 @@ """This module defines a dedicated hook manager for hooks that extends Kedro CLI behaviour.""" -# noqa: global-statement,invalid-name import logging from pluggy import PluginManager diff --git a/kedro/framework/cli/jupyter.py b/kedro/framework/cli/jupyter.py index 3c9418fbb6..17ea4b4f92 100644 --- a/kedro/framework/cli/jupyter.py +++ b/kedro/framework/cli/jupyter.py @@ -36,7 +36,6 @@ def list_commands(self, ctx: click.Context) -> list[str]: return ["setup", "notebook", "lab", "convert"] -# noqa: missing-function-docstring @click.group(name="Kedro") def jupyter_cli() -> None: # pragma: no cover pass @@ -156,7 +155,6 @@ def _create_kernel(kernel_name: str, display_name: str) -> str: """ # These packages are required by jupyter lab and notebook, which we have already # checked are importable, so we don't run _check_module_importable on them. - # noqa: import-outside-toplevel from ipykernel.kernelspec import install try: diff --git a/kedro/framework/cli/pipeline.py b/kedro/framework/cli/pipeline.py index 6b09ad0e2c..94ade3e033 100644 --- a/kedro/framework/cli/pipeline.py +++ b/kedro/framework/cli/pipeline.py @@ -71,7 +71,6 @@ def _check_pipeline_name(ctx: click.Context, param: Any, value: str) -> str: # return value -# noqa: missing-function-docstring @click.group(name="Kedro") def pipeline_cli() -> None: # pragma: no cover pass @@ -216,7 +215,6 @@ def _echo_deletion_warning(message: str, **paths: list[Path]) -> None: def _create_pipeline(name: str, template_path: Path, output_dir: Path) -> Path: - # noqa: import-outside-toplevel from cookiecutter.main import cookiecutter cookie_context = {"pipeline_name": name, "kedro_version": kedro.__version__} diff --git a/kedro/framework/cli/project.py b/kedro/framework/cli/project.py index a38758c767..cd9a072184 100644 --- a/kedro/framework/cli/project.py +++ b/kedro/framework/cli/project.py @@ -61,7 +61,6 @@ CONF_SOURCE_HELP = """Path of a directory where project configuration is stored.""" -# noqa: missing-function-docstring @click.group(name="Kedro") def project_group() -> None: # pragma: no cover pass @@ -197,7 +196,7 @@ def package(metadata: ProjectMetadata) -> None: help=PARAMS_ARG_HELP, callback=_split_params, ) -def run( # noqa: PLR0913,unused-argument,too-many-locals +def run( # noqa: PLR0913 tags: str, env: str, runner: str, diff --git a/kedro/framework/cli/registry.py b/kedro/framework/cli/registry.py index ca82681e04..2c1c576574 100644 --- a/kedro/framework/cli/registry.py +++ b/kedro/framework/cli/registry.py @@ -9,7 +9,6 @@ from kedro.framework.startup import ProjectMetadata -# noqa: missing-function-docstring @click.group(name="Kedro") def registry_cli() -> None: # pragma: no cover pass @@ -31,7 +30,7 @@ def list_registered_pipelines() -> None: @click.pass_obj def describe_registered_pipeline( metadata: ProjectMetadata, /, name: str, **kwargs: Any -) -> None: # noqa: unused-argument, protected-access +) -> None: """Describe a registered pipeline by providing a pipeline name. Defaults to the `__default__` pipeline. """ diff --git a/kedro/framework/cli/starters.py b/kedro/framework/cli/starters.py index 8f37ac16f3..32c3db5a6f 100644 --- a/kedro/framework/cli/starters.py +++ b/kedro/framework/cli/starters.py @@ -36,7 +36,7 @@ Select which tools you'd like to include. By default, none are included.\n Tools\n -1) Linting: Provides a basic linting setup with Black and Ruff\n +1) Linting: Provides a basic linting setup with Ruff\n 2) Testing: Provides basic testing setup with pytest\n 3) Custom Logging: Provides more logging options\n 4) Documentation: Basic documentation setup with Sphinx\n @@ -69,7 +69,7 @@ @define(order=True) -class KedroStarterSpec: # noqa: too-few-public-methods +class KedroStarterSpec: """Specification of custom kedro starter template Args: alias: alias of the starter which shows up on `kedro starter list` and is used @@ -239,7 +239,6 @@ def _print_selection_and_prompt_info( ) -# noqa: missing-function-docstring @click.group(context_settings=CONTEXT_SETTINGS, name="Kedro") def create_cli() -> None: # pragma: no cover pass @@ -391,7 +390,6 @@ def _get_cookiecutter_dir( clones it to ``tmpdir``; if template_path is a file path then directly uses that path without copying anything. """ - # noqa: import-outside-toplevel from cookiecutter.exceptions import RepositoryCloneFailed, RepositoryNotFound from cookiecutter.repository import determine_repo_dir # for performance reasons @@ -458,7 +456,6 @@ def _get_prompts_required_and_clear_from_CLI_provided( def _get_available_tags(template_path: str) -> list: # Not at top level so that kedro CLI works without a working git executable. - # noqa: import-outside-toplevel import git try: @@ -658,7 +655,6 @@ def _fetch_config_from_user_prompts( Configuration for starting a new project. This is passed as ``extra_context`` to cookiecutter and will overwrite the cookiecutter.json defaults. """ - # noqa: import-outside-toplevel from cookiecutter.environment import StrictEnvironment from cookiecutter.prompt import read_user_variable, render_variable @@ -686,7 +682,6 @@ def _fetch_config_from_user_prompts( def _make_cookiecutter_context_for_prompts(cookiecutter_dir: Path) -> OrderedDict: - # noqa: import-outside-toplevel from cookiecutter.generate import generate_context cookiecutter_context = generate_context(cookiecutter_dir / "cookiecutter.json") @@ -890,7 +885,6 @@ def _create_project(template_path: str, cookiecutter_args: dict[str, Any]) -> No Raises: KedroCliError: If it fails to generate a project. """ - # noqa: import-outside-toplevel from cookiecutter.main import cookiecutter # for performance reasons try: @@ -959,7 +953,7 @@ def _remove_readonly( def _starter_spec_to_dict( - starter_specs: dict[str, KedroStarterSpec] + starter_specs: dict[str, KedroStarterSpec], ) -> dict[str, dict[str, str]]: """Convert a dictionary of starters spec to a nicely formatted dictionary""" format_dict: dict[str, dict[str, str]] = {} diff --git a/kedro/framework/cli/utils.py b/kedro/framework/cli/utils.py index 40e2a92163..d9403a9eb8 100644 --- a/kedro/framework/cli/utils.py +++ b/kedro/framework/cli/utils.py @@ -51,7 +51,7 @@ def call(cmd: list[str], **kwargs: Any) -> None: # pragma: no cover click.exceptions.Exit: If `subprocess.run` returns non-zero code. """ click.echo(" ".join(shlex.quote(c) for c in cmd)) - code = subprocess.run(cmd, **kwargs).returncode # noqa: PLW1510 + code = subprocess.run(cmd, **kwargs).returncode # noqa: PLW1510, S603 if code: raise click.exceptions.Exit(code=code) @@ -161,7 +161,7 @@ def _dedupe_commands(cli_collections: Sequence[click.CommandCollection]) -> None @staticmethod def _merge_same_name_collections( - groups: Sequence[click.MultiCommand] + groups: Sequence[click.MultiCommand], ) -> list[click.CommandCollection]: named_groups: defaultdict[str, list[click.MultiCommand]] = defaultdict(list) helps: defaultdict[str, list] = defaultdict(list) @@ -235,7 +235,7 @@ def get_pkg_version(reqs_path: (str | Path), package_name: str) -> str: raise KedroCliError(f"Cannot find '{package_name}' package in '{reqs_path}'.") -def _update_verbose_flag(ctx: click.Context, param: Any, value: bool) -> None: # noqa: unused-argument +def _update_verbose_flag(ctx: click.Context, param: Any, value: bool) -> None: KedroCliError.VERBOSE_ERROR = value @@ -301,12 +301,11 @@ def _clean_pycache(path: Path) -> None: shutil.rmtree(each, ignore_errors=True) -def split_string(ctx: click.Context, param: Any, value: str) -> list[str]: # noqa: unused-argument +def split_string(ctx: click.Context, param: Any, value: str) -> list[str]: """Split string by comma.""" return [item.strip() for item in value.split(",") if item.strip()] -# noqa: unused-argument,missing-param-doc,missing-type-doc def split_node_names(ctx: click.Context, param: Any, to_split: str) -> list[str]: """Split string by comma, ignoring commas enclosed by square parentheses. This avoids splitting the string of nodes names on commas included in @@ -367,13 +366,13 @@ def _get_entry_points(name: str) -> Any: ) -def _safe_load_entry_point( # noqa: inconsistent-return-statements +def _safe_load_entry_point( entry_point: Any, ) -> Any: """Load entrypoint safely, if fails it will just skip the entrypoint.""" try: return entry_point.load() - except Exception as exc: # noqa: broad-except + except Exception as exc: logger.warning( "Failed to load %s commands from %s. Full exception: %s", entry_point.module, @@ -406,7 +405,7 @@ def load_entry_points(name: str) -> Sequence[click.MultiCommand]: @typing.no_type_check -def _config_file_callback(ctx: click.Context, param: Any, value: Any) -> Any: # noqa: unused-argument +def _config_file_callback(ctx: click.Context, param: Any, value: Any) -> Any: """CLI callback that replaces command line options with values specified in a config file. If command line options are passed, they override config file values. diff --git a/kedro/framework/context/context.py b/kedro/framework/context/context.py index 874f21a721..b82a979ffa 100644 --- a/kedro/framework/context/context.py +++ b/kedro/framework/context/context.py @@ -133,7 +133,6 @@ def _validate_transcoded_datasets(catalog: DataCatalog) -> None: `_transcode_split` function. """ - # noqa: protected-access for dataset_name in catalog._datasets.keys(): _transcode_split(dataset_name) diff --git a/kedro/framework/project/__init__.py b/kedro/framework/project/__init__.py index b7e986010d..ea56f5d668 100644 --- a/kedro/framework/project/__init__.py +++ b/kedro/framework/project/__init__.py @@ -1,6 +1,5 @@ -"""``kedro.framework.project`` module provides utitlity to +"""``kedro.framework.project`` module provides utility to configure a Kedro project and access its settings.""" -# noqa: redefined-outer-name,unused-argument,global-statement from __future__ import annotations import importlib @@ -139,7 +138,6 @@ def _load_data_wrapper(func: Any) -> Any: Taking inspiration from dynaconf.utils.functional.new_method_proxy """ - # noqa: protected-access def inner(self: Any, *args: Any, **kwargs: Any) -> Any: self._load_data() return func(self._content, *args, **kwargs) @@ -216,7 +214,6 @@ def configure(self, pipelines_module: str | None = None) -> None: class _ProjectLogging(UserDict): - # noqa: super-init-not-called def __init__(self) -> None: """Initialise project logging. The path to logging configuration is given in environment variable KEDRO_LOGGING_CONFIG (defaults to default_logging.yml).""" @@ -348,7 +345,7 @@ def find_pipelines() -> dict[str, Pipeline]: # noqa: PLR0912 pipeline_module_name = f"{PACKAGE_NAME}.pipeline" try: pipeline_module = importlib.import_module(pipeline_module_name) - except Exception as exc: # noqa: broad-except + except Exception as exc: if str(exc) != f"No module named '{pipeline_module_name}'": warnings.warn( IMPORT_ERROR_MESSAGE.format( diff --git a/kedro/framework/session/session.py b/kedro/framework/session/session.py index 166cc521d7..41c27fd21d 100644 --- a/kedro/framework/session/session.py +++ b/kedro/framework/session/session.py @@ -33,20 +33,20 @@ def _describe_git(project_path: Path) -> dict[str, dict[str, Any]]: path = str(project_path) try: res = subprocess.check_output( - ["git", "rev-parse", "--short", "HEAD"], + ["git", "rev-parse", "--short", "HEAD"], # noqa: S603, S607 cwd=path, stderr=subprocess.STDOUT, ) git_data: dict[str, Any] = {"commit_sha": res.decode().strip()} git_status_res = subprocess.check_output( - ["git", "status", "--short"], + ["git", "status", "--short"], # noqa: S603, S607 cwd=path, stderr=subprocess.STDOUT, ) git_data["dirty"] = bool(git_status_res.decode().strip()) # `subprocess.check_output()` raises `NotADirectoryError` on Windows - except Exception: # noqa: broad-except + except Exception: logger = logging.getLogger(__name__) logger.debug("Unable to git describe %s", path) logger.debug(traceback.format_exc()) @@ -72,7 +72,6 @@ class KedroSessionError(Exception): pass -# noqa: too-many-instance-attributes class KedroSession: """``KedroSession`` is the object that is responsible for managing the lifecycle of a Kedro run. Use `KedroSession.create()` as @@ -175,7 +174,7 @@ def create( # noqa: PLR0913 try: session_data["username"] = getpass.getuser() - except Exception as exc: # noqa: broad-except + except Exception as exc: logging.getLogger(__name__).debug( "Unable to get username. Full exception: %s", exc ) @@ -270,7 +269,7 @@ def __exit__(self, exc_type: Any, exc_value: Any, tb_: Any) -> None: self._log_exception(exc_type, exc_value, tb_) self.close() - def run( # noqa: PLR0913,too-many-locals + def run( # noqa: PLR0913 self, pipeline_name: str | None = None, tags: Iterable[str] | None = None, @@ -372,7 +371,7 @@ def run( # noqa: PLR0913,too-many-locals "runner": getattr(runner, "__name__", str(runner)), } - catalog = context._get_catalog( # noqa: protected-access + catalog = context._get_catalog( save_version=save_version, load_versions=load_versions, ) diff --git a/kedro/framework/session/shelvestore.py b/kedro/framework/session/shelvestore.py index 3bf34157bc..722a663496 100644 --- a/kedro/framework/session/shelvestore.py +++ b/kedro/framework/session/shelvestore.py @@ -26,7 +26,7 @@ def read(self) -> dict[str, Any]: """Read the data from disk using `shelve` package.""" data: dict[str, Any] = {} try: - with shelve.open(str(self._location), flag="r") as _sh: # nosec + with shelve.open(str(self._location), flag="r") as _sh: # noqa: S301 data = dict(_sh) except dbm.error: pass @@ -37,7 +37,7 @@ def save(self) -> None: location = self._location location.parent.mkdir(parents=True, exist_ok=True) - with self._lock, shelve.open(str(location)) as _sh: # nosec + with self._lock, shelve.open(str(location)) as _sh: # noqa: S301 keys_to_del = _sh.keys() - self.data.keys() for key in keys_to_del: del _sh[key] diff --git a/kedro/io/cached_dataset.py b/kedro/io/cached_dataset.py index 410073a65d..afec2e1134 100644 --- a/kedro/io/cached_dataset.py +++ b/kedro/io/cached_dataset.py @@ -92,8 +92,8 @@ def _from_config(config: dict, version: Version | None) -> AbstractDataset: def _describe(self) -> dict[str, Any]: return { - "dataset": self._dataset._describe(), # noqa: protected-access - "cache": self._cache._describe(), # noqa: protected-access + "dataset": self._dataset._describe(), + "cache": self._cache._describe(), } def _load(self) -> Any: diff --git a/kedro/io/data_catalog.py b/kedro/io/data_catalog.py index 1b05c8634d..411cf14e09 100644 --- a/kedro/io/data_catalog.py +++ b/kedro/io/data_catalog.py @@ -410,7 +410,7 @@ def _get_dataset( if version and isinstance(dataset, AbstractVersionedDataset): # we only want to return a similar-looking dataset, # not modify the one stored in the current catalog - dataset = dataset._copy(_version=version) # noqa: protected-access + dataset = dataset._copy(_version=version) return dataset diff --git a/kedro/io/memory_dataset.py b/kedro/io/memory_dataset.py index 3645de0a29..bce8315966 100644 --- a/kedro/io/memory_dataset.py +++ b/kedro/io/memory_dataset.py @@ -92,7 +92,6 @@ def _infer_copy_mode(data: Any) -> str: Returns: One of "copy", "assign" or "deepcopy" as the copy mode to use. """ - # noqa: import-outside-toplevel try: import pandas as pd except ImportError: # pragma: no cover diff --git a/kedro/ipython/__init__.py b/kedro/ipython/__init__.py index d193789b59..b83c9194c8 100644 --- a/kedro/ipython/__init__.py +++ b/kedro/ipython/__init__.py @@ -18,7 +18,7 @@ from kedro.framework.cli.project import CONF_SOURCE_HELP, PARAMS_ARG_HELP from kedro.framework.cli.utils import ENV_HELP, _split_params from kedro.framework.project import ( - LOGGING, # noqa + LOGGING, # noqa: F401 configure_project, pipelines, ) @@ -75,7 +75,7 @@ def magic_reload_kedro( ) -> None: """ The `%reload_kedro` IPython line magic. - See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long + See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic for more. """ args = parse_argstring(magic_reload_kedro, line) @@ -142,7 +142,6 @@ def _resolve_project_path( project_path = Path(path).expanduser().resolve() else: if local_namespace and "context" in local_namespace: - # noqa: protected-access project_path = local_namespace["context"].project_path else: project_path = _find_kedro_project(Path.cwd()) @@ -153,7 +152,6 @@ def _resolve_project_path( project_path, ) - # noqa: protected-access if ( project_path and local_namespace diff --git a/kedro/pipeline/modular_pipeline.py b/kedro/pipeline/modular_pipeline.py index 68cb4e2b12..6e89b810b4 100644 --- a/kedro/pipeline/modular_pipeline.py +++ b/kedro/pipeline/modular_pipeline.py @@ -77,7 +77,7 @@ def _validate_datasets_exist( def _get_dataset_names_mapping( - names: str | set[str] | dict[str, str] | None = None + names: str | set[str] | dict[str, str] | None = None, ) -> dict[str, str]: """Take a name or a collection of dataset names and turn it into a mapping from the old dataset names to the provided ones if necessary. @@ -114,7 +114,7 @@ def _normalize_param_name(name: str) -> str: def _get_param_names_mapping( - names: str | set[str] | dict[str, str] | None = None + names: str | set[str] | dict[str, str] | None = None, ) -> dict[str, str]: """Take a parameter or a collection of parameter names and turn it into a mapping from existing parameter names to new ones if necessary. @@ -260,7 +260,7 @@ def _rename(name: str) -> str: return name def _process_dataset_names( - datasets: str | list[str] | dict[str, str] | None + datasets: str | list[str] | dict[str, str] | None, ) -> str | list[str] | dict[str, str] | None: if datasets is None: return None diff --git a/kedro/pipeline/node.py b/kedro/pipeline/node.py index 5f470e14e1..32baaf8e35 100644 --- a/kedro/pipeline/node.py +++ b/kedro/pipeline/node.py @@ -519,7 +519,7 @@ def _validate_inputs_dif_than_outputs(self) -> None: @staticmethod def _process_inputs_for_bind( - inputs: str | list[str] | dict[str, str] | None + inputs: str | list[str] | dict[str, str] | None, ) -> tuple[list[str], dict[str, str]]: # Safeguard that we do not mutate list inputs inputs = copy.copy(inputs) diff --git a/kedro/pipeline/pipeline.py b/kedro/pipeline/pipeline.py index 802d25e3c2..a453bb91a4 100644 --- a/kedro/pipeline/pipeline.py +++ b/kedro/pipeline/pipeline.py @@ -71,7 +71,7 @@ class ConfirmNotUniqueError(Exception): pass -class Pipeline: # noqa: too-many-public-methods +class Pipeline: """A ``Pipeline`` defined as a collection of ``Node`` objects. This class treats nodes as part of a graph representation and provides inputs, outputs and execution order. diff --git a/kedro/runner/parallel_runner.py b/kedro/runner/parallel_runner.py index ae4db946f3..d79e4685be 100644 --- a/kedro/runner/parallel_runner.py +++ b/kedro/runner/parallel_runner.py @@ -42,13 +42,12 @@ class ParallelRunnerManager(SyncManager): """ -ParallelRunnerManager.register("MemoryDataset", MemoryDataset) # noqa: no-member +ParallelRunnerManager.register("MemoryDataset", MemoryDataset) def _bootstrap_subprocess( package_name: str, logging_config: dict[str, Any] | None = None ) -> None: - # noqa: import-outside-toplevel,cyclic-import from kedro.framework.project import configure_logging, configure_project configure_project(package_name) @@ -129,7 +128,7 @@ def __init__( is_async=is_async, extra_dataset_patterns=self._extra_dataset_patterns ) self._manager = ParallelRunnerManager() - self._manager.start() # noqa: consider-using-with + self._manager.start() # This code comes from the concurrent.futures library # https://github.com/python/cpython/blob/master/Lib/concurrent/futures/process.py#L588 @@ -172,7 +171,7 @@ def _validate_catalog(cls, catalog: DataCatalog, pipeline: Pipeline) -> None: will not be synchronized across threads. """ - datasets = catalog._datasets # noqa: protected-access + datasets = catalog._datasets unserialisable = [] for name, dataset in datasets.items(): @@ -235,7 +234,7 @@ def _get_required_workers_count(self, pipeline: Pipeline) -> int: return min(required_processes, self._max_workers) - def _run( # noqa: too-many-locals,useless-suppression + def _run( self, pipeline: Pipeline, catalog: DataCatalog, @@ -258,7 +257,6 @@ def _run( # noqa: too-many-locals,useless-suppression Exception: In case of any downstream node failure. """ - # noqa: import-outside-toplevel,cyclic-import if not self._is_async: self._logger.info( "Using synchronous mode for loading and saving data. Use the --async flag " diff --git a/kedro/runner/runner.py b/kedro/runner/runner.py index 20b594e8e0..ae653f37ea 100644 --- a/kedro/runner/runner.py +++ b/kedro/runner/runner.py @@ -287,7 +287,6 @@ def _has_persistent_inputs(node: Node, catalog: DataCatalog) -> bool: """ for node_input in node.inputs: - # noqa: protected-access if isinstance(catalog._datasets[node_input], MemoryDataset): return False return True diff --git a/kedro/runner/thread_runner.py b/kedro/runner/thread_runner.py index 0d28d3070f..3d8ef12111 100644 --- a/kedro/runner/thread_runner.py +++ b/kedro/runner/thread_runner.py @@ -81,7 +81,7 @@ def _get_required_workers_count(self, pipeline: Pipeline) -> int: else required_threads ) - def _run( # noqa: too-many-locals,useless-suppression + def _run( self, pipeline: Pipeline, catalog: DataCatalog, @@ -125,7 +125,7 @@ def _run( # noqa: too-many-locals,useless-suppression ) ) if not futures: - assert not todo_nodes, (todo_nodes, done_nodes, ready, done) + assert not todo_nodes, (todo_nodes, done_nodes, ready, done) # noqa: S101 break done, futures = wait(futures, return_when=FIRST_COMPLETED) for future in done: diff --git a/kedro/templates/project/hooks/utils.py b/kedro/templates/project/hooks/utils.py index 8aba85988d..82d3769035 100644 --- a/kedro/templates/project/hooks/utils.py +++ b/kedro/templates/project/hooks/utils.py @@ -7,8 +7,8 @@ current_dir = Path.cwd() # Requirements for linting tools -lint_requirements = "black~=22.0\nruff~=0.0.290\n" # For requirements.txt -lint_pyproject_requirements = ["tool.ruff"] # For pyproject.toml +lint_requirements = "ruff~=0.1.8\n" # For requirements.txt +lint_pyproject_requirements = ["tool.ruff", "tool.ruff.format"] # For pyproject.toml # Requirements and configurations for testing tools and coverage reporting test_requirements = ( # For requirements.txt diff --git a/kedro/templates/project/prompts.yml b/kedro/templates/project/prompts.yml index 28bfdc595f..9f29069f43 100644 --- a/kedro/templates/project/prompts.yml +++ b/kedro/templates/project/prompts.yml @@ -17,7 +17,7 @@ tools: To find out more: https://docs.kedro.org/en/stable/starters/new_project_tools.html Tools - 1) Lint: Basic linting with Black and Ruff + 1) Lint: Basic linting with Ruff 2) Test: Basic testing with pytest 3) Log: Additional, environment-specific logging options 4) Docs: A Sphinx documentation setup diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/pyproject.toml b/kedro/templates/project/{{ cookiecutter.repo_name }}/pyproject.toml index 4f32efe803..229c537803 100644 --- a/kedro/templates/project/{{ cookiecutter.repo_name }}/pyproject.toml +++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/pyproject.toml @@ -51,6 +51,9 @@ fail_under = 0 show_missing = true exclude_lines = ["pragma: no cover", "raise NotImplementedError"] +[tool.ruff.format] +docstring-code-format = true + [tool.ruff] line-length = 88 show-fixes = true @@ -63,4 +66,4 @@ select = [ "PL", # Pylint "T201", # Print Statement ] -ignore = ["E501"] # Black takes care of line-too-long +ignore = ["E501"] # Ruff format takes care of line-too-long diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/requirements.txt b/kedro/templates/project/{{ cookiecutter.repo_name }}/requirements.txt index ce407f7b6d..871b096c44 100644 --- a/kedro/templates/project/{{ cookiecutter.repo_name }}/requirements.txt +++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/requirements.txt @@ -1,4 +1,3 @@ -black~=22.0 ipython>=8.10 jupyterlab>=3.0 notebook @@ -7,4 +6,4 @@ kedro-telemetry>=0.3.1 pytest-cov~=3.0 pytest-mock>=1.7.1, <2.0 pytest~=7.2 -ruff~=0.0.290 +ruff~=0.1.8 diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py index 45ee6268a4..843e1b49cf 100644 --- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py +++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py @@ -23,7 +23,7 @@ # CONF_SOURCE = "conf" # Class that manages how configuration is loaded. -# from kedro.config import OmegaConfigLoader # noqa: import-outside-toplevel +# from kedro.config import OmegaConfigLoader # CONFIG_LOADER_CLASS = OmegaConfigLoader diff --git a/pyproject.toml b/pyproject.toml index dcbf7c4a0d..f1cf8bc650 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,10 +53,7 @@ dynamic = ["readme", "version"] [project.optional-dependencies] test = [ - "bandit>=1.6.2, <2.0", "behave==1.2.6", - "blacken-docs==1.9.2", - "black~=22.0", "coverage[toml]", "import-linter==2.0", "ipython>=7.31.1, <8.0; python_version < '3.8'", @@ -127,6 +124,7 @@ version = {attr = "kedro.__version__"} [tool.ruff.format] exclude = ["**/templates", "features/steps/test_starter"] +docstring-code-format = true [tool.coverage.report] fail_under = 100 @@ -230,15 +228,16 @@ select = [ "UP", # pyupgrade "PL", # Pylint "T201", # Print Statement + "S", # flake8-bandit ] -ignore = ["E501"] # ruff format takes care of line-too-long +ignore = ["E501"] [tool.ruff.isort] known-first-party = ["kedro"] [tool.ruff.per-file-ignores] "{tests,docs}/*" = ["PLR2004","PLR0913"] -"{tests,docs,tools,static,features,docs}/*" = ["T201"] # Check print statement for kedro/ only +"{tests,docs,tools,static,features,docs}/*" = ["T201", "S101", "S108"] # Check print statement for kedro/ only [tool.mypy] ignore_missing_imports = true diff --git a/tests/config/test_omegaconf_config.py b/tests/config/test_omegaconf_config.py index 640def26d4..c820bf4d0c 100644 --- a/tests/config/test_omegaconf_config.py +++ b/tests/config/test_omegaconf_config.py @@ -623,7 +623,7 @@ def test_env_resolver_is_registered_after_loading(self, tmp_path): @use_config_dir def test_load_config_from_tar_file(self, tmp_path): subprocess.run( # noqa: PLW1510 - [ + [ # noqa: S603, S607 "tar", "--exclude=local/*.yml", "-czf", diff --git a/tests/framework/cli/test_starters.py b/tests/framework/cli/test_starters.py index 64fdd5006a..1e11c6e45b 100644 --- a/tests/framework/cli/test_starters.py +++ b/tests/framework/cli/test_starters.py @@ -19,6 +19,7 @@ KedroStarterSpec, _convert_tool_names_to_numbers, _fetch_config_from_user_prompts, + _make_cookiecutter_args_and_fetch_template, _parse_tools_input, _parse_yes_no_to_bool, _validate_selection, @@ -45,6 +46,14 @@ def mock_cookiecutter(mocker): return mocker.patch("cookiecutter.main.cookiecutter") +def mock_make_cookiecutter_args_and_fetch_template(*args, **kwargs): + cookiecutter_args, starter_path = _make_cookiecutter_args_and_fetch_template( + *args, **kwargs + ) + cookiecutter_args["checkout"] = "main" # Force the checkout to be "main" + return cookiecutter_args, starter_path + + def _clean_up_project(project_dir): if project_dir.is_dir(): shutil.rmtree(str(project_dir), ignore_errors=True) @@ -134,7 +143,6 @@ def _assert_requirements_ok( with open(requirements_file_path) as requirements_file: requirements = requirements_file.read() - assert "black" in requirements assert "ruff" in requirements pyproject_config = toml.load(pyproject_file_path) @@ -145,6 +153,7 @@ def _assert_requirements_ok( "show-fixes": True, "select": ["F", "W", "E", "I", "UP", "PL", "T201"], "ignore": ["E501"], + "format": {"docstring-code-format": True}, } } } @@ -1038,7 +1047,13 @@ class TestToolsAndExampleFromUserPrompts: ], ) @pytest.mark.parametrize("example_pipeline", ["Yes", "No"]) - def test_valid_tools_and_example(self, fake_kedro_cli, tools, example_pipeline): + def test_valid_tools_and_example( + self, fake_kedro_cli, tools, example_pipeline, mocker + ): + mocker.patch( + "kedro.framework.cli.starters._make_cookiecutter_args_and_fetch_template", + side_effect=mock_make_cookiecutter_args_and_fetch_template, + ) result = CliRunner().invoke( fake_kedro_cli, ["new"], @@ -1185,8 +1200,15 @@ class TestToolsAndExampleFromConfigFile: ], ) @pytest.mark.parametrize("example_pipeline", ["Yes", "No"]) - def test_valid_tools_and_example(self, fake_kedro_cli, tools, example_pipeline): + def test_valid_tools_and_example( + self, fake_kedro_cli, tools, example_pipeline, mocker + ): """Test project created from config.""" + mocker.patch( + "kedro.framework.cli.starters._make_cookiecutter_args_and_fetch_template", + side_effect=mock_make_cookiecutter_args_and_fetch_template, + ) + config = { "tools": tools, "project_name": "New Kedro Project", @@ -1398,7 +1420,11 @@ class TestToolsAndExampleFromCLI: ], ) @pytest.mark.parametrize("example_pipeline", ["Yes", "No"]) - def test_valid_tools_flag(self, fake_kedro_cli, tools, example_pipeline): + def test_valid_tools_flag(self, fake_kedro_cli, tools, example_pipeline, mocker): + mocker.patch( + "kedro.framework.cli.starters._make_cookiecutter_args_and_fetch_template", + side_effect=mock_make_cookiecutter_args_and_fetch_template, + ) result = CliRunner().invoke( fake_kedro_cli, ["new", "--tools", tools, "--example", example_pipeline], diff --git a/tests/io/test_data_catalog.py b/tests/io/test_data_catalog.py index ad1d699b7f..9f424c04a5 100644 --- a/tests/io/test_data_catalog.py +++ b/tests/io/test_data_catalog.py @@ -577,7 +577,7 @@ def dummy_load(obj_path, *args, **kwargs): def test_idempotent_catalog(self, sane_config): """Test that data catalog instantiations are idempotent""" - _ = DataCatalog.from_config(**sane_config) # NOQA + _ = DataCatalog.from_config(**sane_config) catalog = DataCatalog.from_config(**sane_config) assert catalog diff --git a/tests/runner/test_parallel_runner.py b/tests/runner/test_parallel_runner.py index fb967b2cb8..c53a836f17 100644 --- a/tests/runner/test_parallel_runner.py +++ b/tests/runner/test_parallel_runner.py @@ -251,7 +251,7 @@ def _describe(self) -> dict[str, Any]: return {} -ParallelRunnerManager.register("LoggingDataset", LoggingDataset) # noqa: no-member +ParallelRunnerManager.register("LoggingDataset", LoggingDataset) @pytest.mark.parametrize("is_async", [False, True]) @@ -263,7 +263,6 @@ def test_dont_release_inputs_and_outputs(self, is_async): pipeline = modular_pipeline( [node(identity, "in", "middle"), node(identity, "middle", "out")] ) - # noqa: no-member catalog = DataCatalog( { "in": runner._manager.LoggingDataset(log, "in", "stuff"), @@ -287,7 +286,6 @@ def test_release_at_earliest_opportunity(self, is_async): node(sink, "second", None), ] ) - # noqa: no-member catalog = DataCatalog( { "first": runner._manager.LoggingDataset(log, "first"), diff --git a/tools/print_env.sh b/tools/print_env.sh index 0a559a6d25..e91d8fb54c 100755 --- a/tools/print_env.sh +++ b/tools/print_env.sh @@ -17,7 +17,7 @@ eval_command CONDA "conda info 2>/dev/null || echo \"Conda not found\"" eval_command PYTHON "which python && python -V" eval_command PIP "python -m pip -V" eval_command PYTEST "python -m pytest --version" -eval_command BLACK "python -m black --version" +eval_command RUFF "ruff --version" eval_command BEHAVE "python -m behave --version" eval_command PRE-COMMIT "python -m pre_commit --version" eval_command SPARK "python -c \\