From f05f36bbc7b5a2d68e2df1eb1fd725a8e6857522 Mon Sep 17 00:00:00 2001 From: ramcherukuri Date: Fri, 11 Oct 2024 19:43:00 +0000 Subject: [PATCH] change tolerance for test_cublas_addmm --- test/test_matmul_cuda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_matmul_cuda.py b/test/test_matmul_cuda.py index 302fe3638f02c0..99527e62e3b165 100644 --- a/test/test_matmul_cuda.py +++ b/test/test_matmul_cuda.py @@ -110,7 +110,7 @@ def cublas_addmm(self, size: int, dtype: torch.dtype, reduced_precision: bool = @onlyCUDA @skipIfRocmVersionLessThan((5, 2)) # imported 'tol' as 'xtol' to avoid aliasing in code above - @toleranceOverride({torch.float16: xtol(atol=1e-1, rtol=1e-1), + @toleranceOverride({torch.float16: xtol(atol=4e-1, rtol=1e-1), torch.bfloat16: xtol(atol=1e-1, rtol=1e-1), torch.float32: xtol(atol=1e-1, rtol=1e-1)}) @dtypes(torch.float16, torch.bfloat16, torch.float32)