Skip to content

Fix test_elastic_training_agent.py for torch version 2.4 and above. #1531

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
155 changes: 50 additions & 105 deletions dlrover/python/elastic_agent/torch/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,26 @@ def _get_free_port(self):
free_port = find_free_port_in_range(20000, 30000)
return free_port

def _get_ranks(
self,
role_infos: List[_RoleInstanceInfo],
role_idx: int,
start_idx: int = 0,
end_idx: int = -1,
) -> Tuple[int, List[int]]:
if end_idx == -1:
end_idx = len(role_infos)
prefix_sum = 0
total_sum = 0
for idx in range(start_idx, end_idx):
if role_idx > idx:
prefix_sum += role_infos[idx].local_world_size
total_sum += role_infos[idx].local_world_size
return (
total_sum,
list(range(prefix_sum, prefix_sum + role_infos[role_idx].local_world_size)),
)

# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
Expand Down Expand Up @@ -809,112 +829,37 @@ def _assign_worker_ranks(
role_infos.append(role_info)
group_rank = nodes.index(node_id)

if version_less_than_240():
my_role_info = role_infos[group_rank]
worker_world_size, worker_global_ranks = self._get_ranks(
role_infos, group_rank
)
role_infos = sorted(
role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare)
)
(
role_start_idx,
role_end_idx,
) = _RoleInstanceInfo.find_role_boundaries(
role_infos, my_role_info.role
)
role_pos = next(
idx
for idx, role_info in enumerate(role_infos)
if _RoleInstanceInfo.compare(role_info, my_role_info) == 0
)
role_world_size, role_ranks = self._get_ranks(
role_infos, role_pos, role_start_idx, role_end_idx + 1
)
workers = []
for ind in range(spec.local_world_size):
worker = Worker(
local_rank=ind,
global_rank=worker_global_ranks[ind],
role_rank=role_ranks[ind],
world_size=worker_world_size,
role_world_size=role_world_size,
)
workers.append(worker)
else:
group_world_size = len(world)

ROLE_INFO_PREFIX = "torchelastic/role_info/"
ASSIGNED_RANKS_PREFIX = "torchelastic/assigned_ranks/"

agent_role_info = _RoleInstanceInfo(
spec.role, group_rank, spec.local_world_size
)
self._store.set(
f"{ROLE_INFO_PREFIX}{group_rank}", agent_role_info.serialize()
)

if group_rank == 0:
role_infos_bytes = self._store.multi_get(
[
f"torchelastic/role_info/{i}"
for i in range(group_world_size)
]
)
role_infos = [
_RoleInstanceInfo.deserialize(info_bytes)
for info_bytes in role_infos_bytes
]

role_sizes: DefaultDict[str, int] = defaultdict(lambda: 0)
global_size = 0
for role_info in role_infos:
role_sizes[role_info.role] += role_info.local_world_size
global_size += role_info.local_world_size

base_global_rank = 0
role_ranks = defaultdict(lambda: 0)

keys = []
values = []
for i, role_info in enumerate(role_infos):
keys.append(f"{ASSIGNED_RANKS_PREFIX}{i}")
values.append(
json.dumps(
[
base_global_rank,
global_size,
role_ranks[role_info.role],
role_sizes[role_info.role],
]
)
)

base_global_rank += role_info.local_world_size
role_ranks[role_info.role] += role_info.local_world_size

self._store.multi_set(keys, values)

# get will block until the data is available in the store.
(
base_global_rank,
global_world_size,
base_role_rank,
role_world_size,
) = json.loads(
self._store.get(f"{ASSIGNED_RANKS_PREFIX}{group_rank}")
my_role_info = role_infos[group_rank]
worker_world_size, worker_global_ranks = self._get_ranks(
role_infos, group_rank
)
role_infos = sorted(
role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare)
)
(
role_start_idx,
role_end_idx,
) = _RoleInstanceInfo.find_role_boundaries(
role_infos, my_role_info.role
)
role_pos = next(
idx
for idx, role_info in enumerate(role_infos)
if _RoleInstanceInfo.compare(role_info, my_role_info) == 0
)
role_world_size, role_ranks = self._get_ranks(
role_infos, role_pos, role_start_idx, role_end_idx + 1
)
workers = []
for ind in range(spec.local_world_size):
worker = Worker(
local_rank=ind,
global_rank=worker_global_ranks[ind],
role_rank=role_ranks[ind],
world_size=worker_world_size,
role_world_size=role_world_size,
)

workers = []
for local_rank in range(spec.local_world_size):
worker = Worker(
local_rank=local_rank,
global_rank=base_global_rank + local_rank,
role_rank=base_role_rank + local_rank,
world_size=global_world_size,
role_world_size=role_world_size,
)
workers.append(worker)
workers.append(worker)
return workers

def _initialize_workers(self, worker_group, max_errors=3):
Expand Down
22 changes: 16 additions & 6 deletions dlrover/python/tests/test_elastic_training_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
node_health_check,
)
from dlrover.python.tests.test_utils import start_local_master
from dlrover.trainer.torch.utils import version_less_than_230

_metric_context = JobMetricContext.singleton_instance()
_dlrover_context = Context.singleton_instance()
Expand Down Expand Up @@ -129,6 +130,10 @@ def setUp(self) -> None:
)
self.rdzv_handler.join_timeout = 5

if version_less_than_230():
logs_dict = {"redirects": self.config.redirects, "tee": self.config.tee}
else:
logs_dict = {}
self.spec = WorkerSpec(
role=self.config.role,
local_world_size=self.config.nproc_per_node,
Expand All @@ -137,10 +142,9 @@ def setUp(self) -> None:
rdzv_handler=self.rdzv_handler,
max_restarts=self.config.max_restarts,
monitor_interval=self.config.monitor_interval,
redirects=self.config.redirects,
tee=self.config.tee,
master_addr=master_addr,
local_addr=self.config.local_addr,
**logs_dict
)
JobConstant.TRAINING_AGENT_LOOP_DEFAULT_INTERVAL = 1

Expand Down Expand Up @@ -493,6 +497,10 @@ def setUp(self) -> None:
)
self.rdzv_handler.join_timeout = 5

if version_less_than_230():
logs_dict = {"redirects": self.config.redirects, "tee": self.config.tee}
else:
logs_dict = {}
self.spec = WorkerSpec(
role=self.config.role,
local_world_size=self.config.nproc_per_node,
Expand All @@ -501,10 +509,9 @@ def setUp(self) -> None:
rdzv_handler=self.rdzv_handler,
max_restarts=self.config.max_restarts,
monitor_interval=self.config.monitor_interval,
redirects=self.config.redirects,
tee=self.config.tee,
master_addr=master_addr,
local_addr=self.config.local_addr,
**logs_dict
)
JobConstant.TRAINING_AGENT_LOOP_DEFAULT_INTERVAL = 1

Expand Down Expand Up @@ -1028,6 +1035,10 @@ def setUp(self) -> None:
)
self.rdzv_handler.join_timeout = 5

if version_less_than_230():
logs_dict = {"redirects": self.config.redirects, "tee": self.config.tee}
else:
logs_dict = {}
self.spec = WorkerSpec(
role=self.config.role,
local_world_size=self.config.nproc_per_node,
Expand All @@ -1036,10 +1047,9 @@ def setUp(self) -> None:
rdzv_handler=self.rdzv_handler,
max_restarts=self.config.max_restarts,
monitor_interval=self.config.monitor_interval,
redirects=self.config.redirects,
tee=self.config.tee,
master_addr=master_addr,
local_addr=self.config.local_addr,
**logs_dict
)
JobConstant.TRAINING_AGENT_LOOP_DEFAULT_INTERVAL = 1

Expand Down