Skip to content

[monarch] make test_python_actor.py work on mac #269

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: gh/suo/31/base
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion python/tests/test_python_actors.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,12 @@
)
from monarch.debugger import init_debugging

from monarch.mesh_controller import spawn_tensor_engine
from monarch._rust_bindings import has_tensor_engine

if has_tensor_engine():
from monarch.mesh_controller import spawn_tensor_engine
else:
spawn_tensor_engine = None

from monarch.proc_mesh import local_proc_mesh, proc_mesh
from monarch.rdma import RDMABuffer
Expand Down Expand Up @@ -114,6 +119,10 @@ async def get_buffer(self):
return self.buffer


@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="CUDA not available",
)
async def test_proc_mesh_rdma():
proc = await proc_mesh(gpus=1)
server = await proc.spawn("server", ParameterServer)
Expand Down Expand Up @@ -282,6 +291,10 @@ async def update_weights(self):
), f"{torch.sum(self.generator.weight.data)=}, {self.step=}"


@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="CUDA not available",
)
async def test_gpu_trainer_generator():
trainer_proc = await proc_mesh(gpus=1)
gen_proc = await proc_mesh(gpus=1)
Expand Down Expand Up @@ -311,6 +324,10 @@ async def test_sync_actor():
assert r == 5


@pytest.mark.skipif(
not torch.cuda.is_available(),
reason="CUDA not available",
)
def test_gpu_trainer_generator_sync() -> None:
trainer_proc = proc_mesh(gpus=1).get()
gen_proc = proc_mesh(gpus=1).get()
Expand Down Expand Up @@ -391,6 +408,10 @@ def check(module, path):
check(bindings, "monarch._rust_bindings")


@pytest.mark.skipif(
not has_tensor_engine(),
reason="Tensor engine not available",
)
@pytest.mark.skipif(
torch.cuda.device_count() < 2,
reason="Not enough GPUs, this test requires at least 2 GPUs",
Expand Down