add lerp cpu support for half (#105607)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105607
Approved by: https://github.com/albanD
diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py
index 0785666..6c044cc 100644
--- a/test/test_binary_ufuncs.py
+++ b/test/test_binary_ufuncs.py
@@ -3410,7 +3410,6 @@
@onlyCUDA
@dtypes(torch.half, torch.bfloat16)
def test_lerp_lowp(self, device, dtype):
- ref_dtype = torch.float
xvals = (0.0, -30000.0)
yvals = (0.1, -20000.0)
xs = [torch.full((4,), xval, device=device, dtype=dtype) for xval in xvals]
@@ -3425,7 +3424,7 @@
self.assertEqual(actual, expected, atol=0.0, rtol=0.0)
@onlyCPU
- @dtypes(torch.bfloat16)
+ @dtypes(torch.half, torch.bfloat16)
def test_lerp_lowp_cpu(self, device, dtype):
xvals = (0.0, -30000.0)
yvals = (0.1, -20000.0)
diff --git a/test/test_foreach.py b/test/test_foreach.py
index 8ba6925..f76d16b 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -809,7 +809,7 @@
kwargs["weight"] = args[1]
ref_kwargs["weight"] = args[1]
- if dtype in integral_types() or dtype == torch.bool or (not self.is_cuda and dtype == torch.half):
+ if dtype in integral_types() or dtype == torch.bool:
with self.assertRaises(RuntimeError):
wrapped_op(inputs, self.is_cuda, is_fastpath, **kwargs)
return
diff --git a/test/test_mps.py b/test/test_mps.py
index ee8500b..0da3f7b 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -10471,7 +10471,7 @@
'nn.functional.huber_loss',
'true_divide', 'kron',
'gradient', 'var', 'std', 'ldexp',
- 'linalg.vector_norm',
+ 'linalg.vector_norm', 'lerp',
'addr', 'var_mean',
'var_mean_unbiased',
'acosh', 'asinh', 'asin',