[MPS] Add `random_` overload (#98333)
That simply calls `torch.random_(from=0, to=None)`
Also, fix optional upper bound calculation for all `dtypes` but int64:
As one can see from https://pytorch.org/docs/stable/generated/torch.Tensor.random_.html
`from` boundary is inclusive, but `to` is exclusive, i.e. if `to` is
omitted for `torch.int8` dtype, it should be set to `128` and to `2`
for torch.bool.
Add test for `torch.random_`
Fixes https://github.com/pytorch/pytorch/issues/98118
Pull Request resolved: https://github.com/pytorch/pytorch/pull/98333
Approved by: https://github.com/kulinseth
diff --git a/test/test_mps.py b/test/test_mps.py
index 9f6a00a..2b4b0d3 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -7174,7 +7174,7 @@
self.assertTrue(current_alloc_after > current_alloc_before)
self.assertTrue(driver_alloc_after > driver_alloc_before)
- # Test random_.to and random_.from
+ # Test random_, random_.to and random_.from
def test_random(self):
def helper(shape, low, high, dtype=torch.int32):
@@ -7182,8 +7182,8 @@
# We can't check reliably the mean and std.
# Just make sure we don't return constant values
- self.assertNotEqual(mps_out.to('cpu').float().mean(), 0.)
- self.assertNotEqual(mps_out.to('cpu').float().std(), 0.)
+ self.assertNotEqual(mps_out.float().mean().item(), 0.)
+ self.assertNotEqual(mps_out.float().std().item(), 0.)
helper([100, 100], 0, 10)
helper([100, 100], 23, 89)
@@ -7191,6 +7191,12 @@
helper([100, 100], 23, 89, dtype=torch.int64)
helper([100, 100], 0, 2, dtype=torch.bool)
+ # Test random_
+ for dtype in [torch.bool, torch.int8, torch.uint8, torch.int32, torch.float16, torch.float32]:
+ x = torch.empty(10, 10, dtype=dtype, device='mps')
+ x.random_()
+ self.assertNotEqual(x.max().item(), 0)
+
# Test exponential
def test_exponential(self):
def helper(shape, lamda, dtype=torch.float32):