Skip to content

Commit

Permalink
Add an option that skip all tests if scenario contains only xfail/skip (
Browse files Browse the repository at this point in the history
  • Loading branch information
cbeauchesne authored Jan 3, 2025
1 parent 1c17b00 commit 1573ab3
Show file tree
Hide file tree
Showing 13 changed files with 95 additions and 14 deletions.
1 change: 1 addition & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ jobs:
build_proxy_image: ${{ contains(github.event.pull_request.labels.*.name, 'build-proxy-image') }}
build_lib_injection_app_images: ${{ contains(github.event.pull_request.labels.*.name, 'build-lib-injection-app-images') }}
_experimental_parametric_job_count: ${{ matrix.version == 'dev' && 2 || 1 }} # test both use cases
skip_empty_scenarios: true

system_tests_docker_mode:
name: Ruby Docker Mode
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/run-end-to-end.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ on:
default: false
required: false
type: boolean
skip_empty_scenarios:
description: "Skip scenarios that contains only xfail or irrelevant tests"
default: false
required: false
type: boolean

env:
REGISTRY: ghcr.io
Expand All @@ -61,6 +66,7 @@ jobs:
env:
SYSTEM_TESTS_REPORT_ENVIRONMENT: ${{ inputs.ci_environment }}
SYSTEM_TESTS_REPORT_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SYSTEM_TESTS_SKIP_EMPTY_SCENARIO: ${{ inputs.skip_empty_scenarios }}
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/run-graphql.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ on:
default: 'custom'
required: false
type: string
skip_empty_scenarios:
description: "Skip scenarios that contains only xfail or irrelevant tests"
default: false
required: false
type: boolean

env:
REGISTRY: ghcr.io
Expand All @@ -43,6 +48,7 @@ jobs:
env:
SYSTEM_TESTS_REPORT_ENVIRONMENT: ${{ inputs.ci_environment }}
SYSTEM_TESTS_REPORT_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SYSTEM_TESTS_SKIP_EMPTY_SCENARIO: ${{ inputs.skip_empty_scenarios }}

steps:
- name: Checkout
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/run-open-telemetry.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,18 @@ on:
default: "[]"
required: false
type: string
skip_empty_scenarios:
description: "Skip scenarios that contains only xfail or irrelevant tests"
default: false
required: false
type: boolean

jobs:
open-telemetry-manual:
if: inputs.library == 'java'
runs-on: ubuntu-latest
env:
SYSTEM_TESTS_SKIP_EMPTY_SCENARIO: ${{ inputs.skip_empty_scenarios }}
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -94,6 +101,7 @@ jobs:
env:
TEST_LIBRARY: ${{ inputs.library }}_otel
WEBLOG_VARIANT: ${{ matrix.weblog }}
SYSTEM_TESTS_SKIP_EMPTY_SCENARIO: ${{ inputs.skip_empty_scenarios }}
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/system-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,11 @@ on:
default: false
required: false
type: boolean
skip_empty_scenarios:
description: "Skip scenarios that contains only xfail or irrelevant tests"
default: false
required: false
type: boolean
_experimental_parametric_job_count:
description: "*EXPERIMENTAL* : How many jobs should be used to run PARAMETRIC scenario"
default: 1
Expand Down Expand Up @@ -92,6 +97,7 @@ jobs:
binaries_artifact: ${{ inputs.binaries_artifact }}
ci_environment: ${{ inputs.ci_environment }}
build_proxy_image: ${{ inputs.build_proxy_image }}
skip_empty_scenarios: ${{ inputs.skip_empty_scenarios }}

lib-injection:
needs:
Expand Down Expand Up @@ -122,6 +128,7 @@ jobs:
weblogs: ${{ needs.compute_parameters.outputs.endtoend_weblogs }}
binaries_artifact: ${{ inputs.binaries_artifact }}
ci_environment: ${{ inputs.ci_environment }}
skip_empty_scenarios: ${{ inputs.skip_empty_scenarios }}

open-telemetry:
needs:
Expand All @@ -133,6 +140,7 @@ jobs:
library: ${{ inputs.library }}
weblogs: ${{ needs.compute_parameters.outputs.opentelemetry_weblogs }}
build_proxy_image: ${{ inputs.build_proxy_image }}
skip_empty_scenarios: ${{ inputs.skip_empty_scenarios }}

external-processing:
needs:
Expand Down
46 changes: 43 additions & 3 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ def pytest_addoption(parser):
"--force-execute", "-F", action="append", default=[], help="Item to execute, even if they are skipped"
)
parser.addoption("--scenario-report", action="store_true", help="Produce a report on nodeids and their scenario")
parser.addoption(
"--skip-empty-scenario",
action="store_true",
help="Skip scenario if it contains only tests marked as xfail or irrelevant",
)

parser.addoption("--force-dd-trace-debug", action="store_true", help="Set DD_TRACE_DEBUG to true")
parser.addoption("--force-dd-iast-debug", action="store_true", help="Set DD_IAST_DEBUG_ENABLED to true")
Expand Down Expand Up @@ -122,6 +127,12 @@ def pytest_configure(config):
if not config.option.report_run_url and "SYSTEM_TESTS_REPORT_RUN_URL" in os.environ:
config.option.report_run_url = os.environ["SYSTEM_TESTS_REPORT_RUN_URL"]

if (
not config.option.skip_empty_scenario
and os.environ.get("SYSTEM_TESTS_SKIP_EMPTY_SCENARIO", "").lower() == "true"
):
config.option.skip_empty_scenario = True

# First of all, we must get the current scenario
for name in dir(scenarios):
if name.upper() == config.option.scenario:
Expand Down Expand Up @@ -272,6 +283,7 @@ def pytest_collection_modifyitems(session, config, items: list[pytest.Item]):
def iter_markers(self, name=None):
return (x[1] for x in self.iter_markers_with_node(name=name) if x[1].name not in ("skip", "skipif", "xfail"))

must_pass_item_count = 0
for item in items:
# if the item has explicit scenario markers, we use them
# otherwise we use markers declared on its parents
Expand Down Expand Up @@ -302,11 +314,19 @@ def iter_markers(self, name=None):
# including parent's markers) to exclude the skip, skipif and xfail markers.
item.iter_markers = types.MethodType(iter_markers, item)

if _item_must_pass(item):
must_pass_item_count += 1

else:
logger.debug(f"{item.nodeid} is not included in {context.scenario}")
deselected.append(item)
items[:] = selected
config.hook.pytest_deselected(items=deselected)

if must_pass_item_count == 0 and session.config.option.skip_empty_scenario:
items[:] = []
config.hook.pytest_deselected(items=items)
else:
items[:] = selected
config.hook.pytest_deselected(items=deselected)

if config.option.scenario_report:
with open(f"{context.scenario.host_log_folder}/scenarios.json", "w", encoding="utf-8") as f:
Expand All @@ -317,6 +337,22 @@ def pytest_deselected(items):
_deselected_items.extend(items)


def _item_must_pass(item) -> bool:
"""Returns True if the item must pass to be considered as a success"""

if any(item.iter_markers("skip")):
return False

if any(item.iter_markers("xfail")):
return False

for marker in item.iter_markers("skipif"):
if all(marker.args[0]):
return False

return True


def _item_is_skipped(item):
return any(item.iter_markers("skip"))

Expand Down Expand Up @@ -382,7 +418,7 @@ def pytest_collection_finish(session: pytest.Session):
if not session.config.option.replay:
setup_properties.dump(context.scenario.host_log_folder)

context.scenario.post_setup()
context.scenario.post_setup(session)


def pytest_runtest_call(item):
Expand Down Expand Up @@ -412,6 +448,10 @@ def pytest_json_modifyreport(json_report):
def pytest_sessionfinish(session, exitstatus):
logger.info("Executing pytest_sessionfinish")

if session.config.option.skip_empty_scenario and exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
exitstatus = pytest.ExitCode.OK
session.exitstatus = pytest.ExitCode.OK

context.scenario.pytest_sessionfinish(session, exitstatus)

if session.config.option.collectonly or session.config.option.replay:
Expand Down
2 changes: 1 addition & 1 deletion docs/edit/scenarios.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class CustomScenario(Scenario):

return warmups

def post_setup(self):
def post_setup(self, session):
""" called after setup functions, and before test functions """

def pytest_sessionfinish(self, session, exitstatus):
Expand Down
3 changes: 3 additions & 0 deletions docs/execute/skip-empty-scenario.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
The `--skip-empty-scenario` option will deselected all tests if the current scenario contains only tests marked as xfail or skipped (`bug`, `flaky`, `missing_feature`, `irrelevant`).

This option can also be activated with then environment variable `SYSTEM_TESTS_SKIP_EMPTY_SCENARIO=True`
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def get_warmups(self):
lambda: logger.stdout(f"Logs folder: ./{self.host_log_folder}"),
]

def post_setup(self):
def post_setup(self, session: pytest.Session):
"""Called after test setup"""

def pytest_sessionfinish(self, session, exitstatus):
Expand Down
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/docker_ssi.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def print_installed_components(self):
for conf in self.configuration:
logger.stdout(f"{conf}: {self.configuration[conf]}")

def post_setup(self):
def post_setup(self, session): # noqa: ARG002
logger.stdout("--- Waiting for all traces and telemetry to be sent to test agent ---")
data = None
attempts = 0
Expand Down
21 changes: 15 additions & 6 deletions utils/_context/_scenarios/endtoend.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,15 +481,18 @@ def _wait_for_app_readiness(self):
raise ValueError("Datadog agent not ready")
logger.debug("Agent ready")

def post_setup(self):
def post_setup(self, session: pytest.Session):
# if no test are run, skip interface tomeouts
is_empty_test_run = session.config.option.skip_empty_scenario and len(session.items) == 0

try:
self._wait_and_stop_containers()
self._wait_and_stop_containers(force_interface_timout_to_zero=is_empty_test_run)
finally:
self.close_targets()

interfaces.library_dotnet_managed.load_data()

def _wait_and_stop_containers(self):
def _wait_and_stop_containers(self, *, force_interface_timout_to_zero: bool):
if self.replay:
logger.terminal.write_sep("-", "Load all data from logs")
logger.terminal.flush()
Expand All @@ -507,7 +510,9 @@ def _wait_and_stop_containers(self):
interfaces.backend.load_data_from_logs()

else:
self._wait_interface(interfaces.library, self.library_interface_timeout)
self._wait_interface(
interfaces.library, 0 if force_interface_timout_to_zero else self.library_interface_timeout
)

if self.library in ("nodejs",):
from utils import weblog # TODO better interface
Expand All @@ -531,11 +536,15 @@ def _wait_and_stop_containers(self):
container.stop()
container.interface.check_deserialization_errors()

self._wait_interface(interfaces.agent, self.agent_interface_timeout)
self._wait_interface(
interfaces.agent, 0 if force_interface_timout_to_zero else self.agent_interface_timeout
)
self.agent_container.stop()
interfaces.agent.check_deserialization_errors()

self._wait_interface(interfaces.backend, self.backend_interface_timeout)
self._wait_interface(
interfaces.backend, 0 if force_interface_timout_to_zero else self.backend_interface_timeout
)

def _wait_interface(self, interface, timeout):
logger.terminal.write_sep("-", f"Wait for {interface} ({timeout}s)")
Expand Down
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/external_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def get_warmups(self) -> list:

return warmups

def post_setup(self):
def post_setup(self, session: pytest.Session): # noqa: ARG002
try:
self._wait_and_stop_containers()
finally:
Expand Down
2 changes: 1 addition & 1 deletion utils/_context/_scenarios/open_telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _wait_for_app_readiness(self):
raise ValueError("Open telemetry interface not ready")
logger.debug("Open telemetry ready")

def post_setup(self):
def post_setup(self, session: pytest.Session): # noqa: ARG002
if self.use_proxy:
self._wait_interface(interfaces.open_telemetry, 5)
self._wait_interface(interfaces.backend, self.backend_interface_timeout)
Expand Down

0 comments on commit 1573ab3

Please sign in to comment.