Skip to content

Commit

Permalink
[rocm6.4_internal_testing] remove xfail from 'batch_norm_with_update' (
Browse files Browse the repository at this point in the history
…#1821)

remove `xfail` from `batch_norm_with_update` op in `test_grad` and
`test_vmap_autograd_grad`
these tests are passed now
Fixes https://ontrack-internal.amd.com/browse/SWDEV-472564
cherry-picked from rocm6.3_internal_testing PR
#1776
  • Loading branch information
dnikolaev-amd authored and AMD AMD committed Jan 9, 2025
1 parent 89f9b8d commit 0f1d47d
Showing 1 changed file with 0 additions and 14 deletions.
14 changes: 0 additions & 14 deletions test/functorch/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,13 +438,6 @@ class TestOperators(TestCase):
), # Works on ROCm
xfail("torch.ops.aten._flash_attention_forward"),
xfail("torch.ops.aten._efficient_attention_forward"),
# RuntimeError: Expected contiguous tensor, but got
# non-contiguous tensor for argument #2 'grad_output'
decorate(
"_batch_norm_with_update",
decorator=expectedFailureIf(TEST_WITH_ROCM),
device_type="cuda",
),
}
),
)
Expand Down Expand Up @@ -2394,13 +2387,6 @@ def fn(input, weight, bias):
skip("sparse.sampled_addmm", ""),
skip("sparse.mm", "reduce"),
skip("native_layer_norm", "", device_type="cpu"),
# RuntimeError: Expected contiguous tensor, but got
# non-contiguous tensor for argument #2 'grad_output'
decorate(
"_batch_norm_with_update",
decorator=expectedFailureIf(TEST_WITH_ROCM),
device_type="cuda",
),
},
)
@opsToleranceOverride(
Expand Down

0 comments on commit 0f1d47d

Please sign in to comment.