86990 range mps support (#91075)

Fixes #86990

- Added range_mps_out to RangeFactories.mm
- Updated native_functions.yaml
- Added tests in test_mps.py

I did observe that despite [the documentation for torch.range](https://pytorch.org/docs/stable/generated/torch.range.html), the existing implementations do not adjust their return type based off the arguments passed to them. The MPS implementation provided here behaves the same way as the existing CPU and CUDA implementations in this regard, hence the conversion to float32 in the test cases.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/91075
Approved by: https://github.com/kulinseth, https://github.com/DenisVieriu97
diff --git a/test/test_mps.py b/test/test_mps.py
index fa788f3..d23027e 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -5925,6 +5925,13 @@
         y_cpu = torch.arange(0, 0, 1, out=out_cpu)
         self.assertEqual(y_mps, y_cpu)
 
+    # Test rgange
+    def test_range(self):
+        self.assertEqual(np.arange(11, dtype=np.float32), torch.range(0, 10, device='mps'))
+        self.assertEqual(np.arange(7, 0, -1, dtype=np.float32), torch.range(7, 1, -1, device='mps'))
+        self.assertEqual(np.array([1.0000, 1.3000, 1.6000, 1.9000], dtype=np.float32), torch.range(1, 2, .3, device='mps'))
+        self.assertEqual(np.arange(6.3, dtype=np.float32), torch.arange(0, 6.3, device='mps'))
+
     # Test softmax
     def test_softmax(self):
         def helper(shape, dim, channels_last=False):