Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python benchmark for transform logging over time #7039

Merged
merged 3 commits into from
Aug 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion pixi.toml
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ rerun = "cargo run --package rerun-cli --no-default-features --features native_v
# Compile `rerun-cli` without the web-viewer.
rerun-build = "cargo build --package rerun-cli --no-default-features --features native_viewer"

# Compile `rerun-cli` without the web-viewer.
rerun-build-release = "cargo build --package rerun-cli --release --no-default-features --features native_viewer"

# Compile and run the rerun viewer with --release.
#
# You can also give an argument for what to view (e.g. an .rrd file).
Expand Down Expand Up @@ -270,6 +273,10 @@ js-build-all = { cmd = "yarn --cwd rerun_js workspaces run build", depends_on =
py-build-common = { cmd = "PIP_REQUIRE_VIRTUALENV=0 RERUN_ALLOW_MISSING_BIN=1 maturin develop --manifest-path rerun_py/Cargo.toml --extras=tests", depends_on = [
"rerun-build", # We need to build rerun-cli since it is bundled in the python package.
] }
py-build-common-release = { cmd = "PIP_REQUIRE_VIRTUALENV=0 RERUN_ALLOW_MISSING_BIN=1 maturin develop --release --manifest-path rerun_py/Cargo.toml --extras=tests", depends_on = [
"rerun-build-release", # We need to build rerun-cli since it is bundled in the python package.
] }


# Build the `rerun-notebook` package.
py-build-notebook = { cmd = "pip install -e rerun_notebook", depends_on = [
Expand All @@ -282,11 +289,14 @@ py-build-notebook = { cmd = "pip install -e rerun_notebook", depends_on = [
# Dedicated alias for building the python bindings for the `py` environment.
py-build = "pixi run -e py py-build-common"

# Dedicated alias for building the python bindings in release mode for the `py` environment.
py-build-release = "pixi run -e py py-build-common-release"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if there's anything we can do about it, but as an observation, this is very very similar to:

pixi run py-build --release

which almost works, but still depends on rerun-build instead of rerun-build-release.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah that's the reason I didn't go that way - there doesn't seem to be a way to pass arguments through to dependent commands


# Helper alias to run the python interpreter in the context of the python environment
rrpy = "python"

py-bench = { cmd = "python -m pytest -c rerun_py/pyproject.toml --benchmark-only", depends_on = [
"py-build",
"py-build-release",
] }

py-plot-dashboard = { cmd = "python tests/python/plot_dashboard_stress/main.py", depends_on = [
Expand Down
2 changes: 1 addition & 1 deletion tests/python/log_benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class Point3DInput:
label: str = "some label"

@classmethod
def prepare(cls, seed: int, num_points: int):
def prepare(cls, seed: int, num_points: int) -> None:
rng = np.random.default_rng(seed=seed)

return cls(
Expand Down
80 changes: 74 additions & 6 deletions tests/python/log_benchmark/test_log_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,17 @@

from __future__ import annotations

from typing import Any

import numpy as np
import numpy.typing as npt
import pytest
import rerun as rr

from . import Point3DInput


def log_points3d_large_batch(data: Point3DInput):
def log_points3d_large_batch(data: Point3DInput) -> None:
# create a new, empty memory sink for the current recording
rr.memory_recording()

Expand All @@ -20,13 +23,13 @@ def log_points3d_large_batch(data: Point3DInput):


@pytest.mark.parametrize("num_points", [50_000_000])
def test_bench_points3d_large_batch(benchmark, num_points):
def test_bench_points3d_large_batch(benchmark: Any, num_points: int) -> None:
rr.init("rerun_example_benchmark_points3d_large_batch")
data = Point3DInput.prepare(42, num_points)
benchmark(log_points3d_large_batch, data)


def log_points3d_many_individual(data: Point3DInput):
def log_points3d_many_individual(data: Point3DInput) -> None:
# create a new, empty memory sink for the current recording
rr.memory_recording()

Expand All @@ -38,13 +41,13 @@ def log_points3d_many_individual(data: Point3DInput):


@pytest.mark.parametrize("num_points", [100_000])
def test_bench_points3d_many_individual(benchmark, num_points):
def test_bench_points3d_many_individual(benchmark: Any, num_points: int) -> None:
rr.init("rerun_example_benchmark_points3d_many_individual")
data = Point3DInput.prepare(1337, num_points)
benchmark(log_points3d_many_individual, data)


def log_image(image: np.ndarray, num_log_calls):
def log_image(image: np.ndarray, num_log_calls: int) -> None:
# create a new, empty memory sink for the current recording
rr.memory_recording()

Expand All @@ -56,8 +59,73 @@ def log_image(image: np.ndarray, num_log_calls):
["image_dimension", "image_channels", "num_log_calls"],
[pytest.param(16_384, 4, 4, id="16384^2px-4channels-4calls")],
)
def test_bench_image(benchmark, image_dimension, image_channels, num_log_calls):
def test_bench_image(benchmark: Any, image_dimension: int, image_channels: int, num_log_calls: int) -> None:
rr.init("rerun_example_benchmark_image")

image = np.zeros((image_dimension, image_dimension, image_channels), dtype=np.uint8)
benchmark(log_image, image, num_log_calls)


def test_bench_transforms_over_time_individual(
rand_trans: npt.NDArray, rand_quats: npt.NDArray, rand_scales: npt.NDArray
) -> None:
# create a new, empty memory sink for the current recording
rr.memory_recording()

num_transforms = rand_trans.shape[0]
for i in range(num_transforms):
rr.set_time_sequence("frame", i)
rr.log(
"test_transform",
rr.Transform3D(translation=rand_trans[i], rotation=rr.Quaternion(xyzw=rand_quats[i]), scale=rand_scales[i]),
)


def test_bench_transforms_over_time_batched(
rand_trans: npt.NDArray, rand_quats: npt.NDArray, rand_scales: npt.NDArray, num_transforms_per_batch: int
) -> None:
# create a new, empty memory sink for the current recording
rr.memory_recording()

num_transforms = rand_trans.shape[0]
num_log_calls = num_transforms // num_transforms_per_batch
for i in range(num_log_calls):
start = i * num_transforms_per_batch
end = (i + 1) * num_transforms_per_batch
times = np.arange(start, end)

rr.log_temporal_batch(
"test_transform",
times=[rr.TimeSequenceBatch("frame", times)],
components=[
rr.Transform3D.indicator(),
rr.components.Translation3DBatch(rand_trans[start:end]),
rr.components.RotationQuatBatch(rand_quats[start:end]),
rr.components.Scale3DBatch(rand_scales[start:end]),
],
)


@pytest.mark.parametrize(
["num_transforms", "num_transforms_per_batch"],
[
pytest.param(10_000, 1),
pytest.param(10_000, 100),
pytest.param(10_000, 1_000),
],
)
def test_bench_transforms_over_time(benchmark: Any, num_transforms: int, num_transforms_per_batch: int) -> None:
rr.init("rerun_example_benchmark_transforms_individual")

rand_trans = np.array(np.random.rand(num_transforms, 3), dtype=np.float32)
rand_quats = np.array(np.random.rand(num_transforms, 4), dtype=np.float32)
rand_scales = np.array(np.random.rand(num_transforms, 3), dtype=np.float32)

print(rand_trans.shape)

if num_transforms_per_batch > 1:
benchmark(
test_bench_transforms_over_time_batched, rand_trans, rand_quats, rand_scales, num_transforms_per_batch
)
else:
benchmark(test_bench_transforms_over_time_individual, rand_trans, rand_quats, rand_scales)
Loading