blob: 17bac1daf10d45bd026d5a6a2db58200c43c7d8f [file] [log] [blame]
# Owner(s): ["oncall: distributed"]
from copy import deepcopy
import torch
import torch.distributed.checkpoint as dist_cp
from torch.distributed._tensor import init_device_mesh
from torch.distributed.checkpoint.default_planner import (
DefaultLoadPlanner,
DefaultSavePlanner,
)
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestTpCheckpoint(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_tp_checkpoint(self):
CHECKPOINT_DIR = self.temp_dir
mesh_shpe = (self.world_size,)
tp_mesh = init_device_mesh(self.device_type, mesh_shpe)
# create model and move it to GPU with id rank
model = MLPModule(self.device_type).cuda(self.rank)
# Parallelize the module based on the given Parallel Style.
model = parallelize_module(model, tp_mesh, PairwiseParallel())
optimizer = torch.optim.SGD(model.parameters(), lr=0.25)
original_state_dict = deepcopy(model.state_dict())
dist_cp.save_state_dict(
state_dict=original_state_dict,
storage_writer=dist_cp.FileSystemWriter(CHECKPOINT_DIR),
planner=DefaultSavePlanner(),
)
# Update the parameters so model.state_dict() will be different from original_state_dict.
torch.manual_seed(0)
inp = torch.rand(20, 10).cuda(self.rank)
output = model(inp)
output.sum().backward()
optimizer.step()
state_dict = model.state_dict()
# ensure the current model parameters are different from original_state_dict before loading from checkpoint
for param1, param2 in zip(original_state_dict.values(), state_dict.values()):
self.assertNotEqual(param1.to_local(), param2.to_local())
dist_cp.load_state_dict(
state_dict=state_dict,
storage_reader=dist_cp.FileSystemReader(CHECKPOINT_DIR),
planner=DefaultLoadPlanner(),
)
# now load from checkpoint to check current model parameters are the same as original_state_dict
for param1, param2 in zip(original_state_dict.values(), state_dict.values()):
self.assertEqual(param1.to_local(), param2.to_local())
if __name__ == "__main__":
run_tests()