Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add llamatune checks with quantized values #814

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
4b55dbb
Validate the configuration produced by LlamaTune inverse_transform()
motus Jul 17, 2024
57fde9c
Merge branch 'main' into sergiym/opt/llamatune_inv
motus Jul 22, 2024
b58236f
Update azure credentials to be more flexible (#787)
eujing Jul 19, 2024
8034458
Fix mypy 1.11 warnings (#809)
motus Jul 22, 2024
ca898ed
Fix the coercion of scores to floats in the optimizer (#789)
motus Jul 22, 2024
f57d480
Merge remote-tracking branch 'sergiy/sergiym/opt/llamatune_inv' into …
bpkroth Jul 22, 2024
13fb61f
comments
bpkroth Jul 22, 2024
f02411a
comments
bpkroth Jul 22, 2024
b4d5007
apply the q_scaler
bpkroth Jul 22, 2024
e0e3b30
more tweaks
bpkroth Jul 22, 2024
1a44544
more fixups
bpkroth Jul 22, 2024
ceebf90
reorg to make pylint happier
bpkroth Jul 23, 2024
abcfafa
add a todo comment
bpkroth Jul 23, 2024
c7167de
format
bpkroth Jul 23, 2024
cdd71e1
add quantized values to the default tunable groups fixture
bpkroth Jul 23, 2024
60bd4f0
Also validate the other side
bpkroth Jul 23, 2024
ef06e8c
Add explicit tests for quantized values with llamatune
bpkroth Jul 23, 2024
dd83779
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Jul 31, 2024
8ec0d76
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 1, 2024
aff965d
Merge branch 'main' into add-llamatune-checks-with-quantized-values
bpkroth Aug 8, 2024
a2dbb2b
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 15, 2024
b56ba23
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 19, 2024
b3850c3
Add back the quantization for LlamaTune unit tests (#59)
motus Aug 19, 2024
b502b28
Update mlos_bench/mlos_bench/optimizers/convert_configspace.py
motus Aug 19, 2024
7583d43
Update mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_te…
motus Aug 19, 2024
ce0fd04
Update mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
motus Aug 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 2 additions & 33 deletions mlos_bench/mlos_bench/optimizers/convert_configspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
Normal,
Uniform,
)
from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter
from ConfigSpace.types import NotSet

from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import try_parse_val
from mlos_core.spaces.converters.util import monkey_patch_quantization

_LOG = logging.getLogger(__name__)

Expand All @@ -49,37 +49,6 @@ def _normalize_weights(weights: List[float]) -> List[float]:
return [w / total for w in weights]


def _monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.

Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)


def _tunable_to_configspace(
tunable: Tunable,
group_name: Optional[str] = None,
Expand Down Expand Up @@ -171,7 +140,7 @@ def _tunable_to_configspace(
if tunable.quantization_bins:
# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
_monkey_patch_quantization(range_hp, tunable.quantization_bins)
monkey_patch_quantization(range_hp, tunable.quantization_bins)

if not tunable.special:
return ConfigurationSpace({tunable.name: range_hp})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@

from mlos_bench.optimizers.convert_configspace import (
TunableValueKind,
_monkey_patch_quantization,
_tunable_to_configspace,
special_param_names,
tunable_groups_to_configspace,
)
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_core.spaces.converters.util import monkey_patch_quantization

# pylint: disable=redefined-outer-name

Expand Down Expand Up @@ -103,7 +103,7 @@ def configuration_space() -> ConfigurationSpace:
)
hp = spaces["kernel_sched_latency_ns"]
assert isinstance(hp, NumericalHyperparameter)
_monkey_patch_quantization(hp, quantization_bins=10)
monkey_patch_quantization(hp, quantization_bins=11)
return spaces


Expand Down
39 changes: 39 additions & 0 deletions mlos_core/mlos_core/spaces/converters/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""Helper functions for config space converters."""

from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter


def monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.

Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)
30 changes: 29 additions & 1 deletion mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,18 @@
import pytest

from mlos_core.spaces.adapters import LlamaTuneAdapter
from mlos_core.spaces.converters.util import monkey_patch_quantization

# Explicitly test quantized values with llamatune space adapter.
# TODO: Add log scale sampling tests as well.

def construct_parameter_space(

def construct_parameter_space( # pylint: disable=too-many-arguments
*,
n_continuous_params: int = 0,
n_quantized_continuous_params: int = 0,
n_integer_params: int = 0,
n_quantized_integer_params: int = 0,
n_categorical_params: int = 0,
seed: int = 1234,
) -> CS.ConfigurationSpace:
Expand All @@ -26,8 +33,16 @@ def construct_parameter_space(

for idx in range(n_continuous_params):
input_space.add(CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64))
for idx in range(n_quantized_continuous_params):
param_int = CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64)
monkey_patch_quantization(param_int, 6)
input_space.add(param_int)
for idx in range(n_integer_params):
input_space.add(CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=-1, upper=256))
for idx in range(n_quantized_integer_params):
param_float = CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=0, upper=256)
monkey_patch_quantization(param_float, 17)
input_space.add(param_float)
for idx in range(n_categorical_params):
input_space.add(
CS.CategoricalHyperparameter(
Expand All @@ -49,6 +64,13 @@ def construct_parameter_space(
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down Expand Up @@ -358,6 +380,12 @@ def test_max_unique_values_per_param() -> None:
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from ConfigSpace import UniformFloatHyperparameter, UniformIntegerHyperparameter
from numpy.random import RandomState

from mlos_bench.optimizers.convert_configspace import _monkey_patch_quantization
from mlos_bench.tests import SEED
from mlos_core.spaces.converters.util import monkey_patch_quantization
from mlos_core.tests import SEED


def test_configspace_quant_int() -> None:
Expand All @@ -20,7 +20,7 @@ def test_configspace_quant_int() -> None:
# Before patching: expect that at least one value is not quantized.
assert not set(hp.sample_value(100)).issubset(quantized_values)

_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
assert hp.sample_value() in quantized_values # check scalar type
assert set(hp.sample_value(100)).issubset(quantized_values) # batch version
Expand All @@ -35,7 +35,7 @@ def test_configspace_quant_float() -> None:
assert not set(hp.sample_value(100)).issubset(quantized_values)

# 5 is a nice number of bins to avoid floating point errors.
_monkey_patch_quantization(hp, 5)
monkey_patch_quantization(hp, 5)
# After patching: *all* values must belong to the set of quantized values.
assert hp.sample_value() in quantized_values # check scalar type
assert set(hp.sample_value(100)).issubset(quantized_values) # batch version
Expand All @@ -49,18 +49,18 @@ def test_configspace_quant_repatch() -> None:
# Before patching: expect that at least one value is not quantized.
assert not set(hp.sample_value(100)).issubset(quantized_values)

_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
samples = hp.sample_value(100, seed=RandomState(SEED))
assert set(samples).issubset(quantized_values)

# Patch the same hyperparameter again and check that the results are the same.
_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
assert all(samples == hp.sample_value(100, seed=RandomState(SEED)))

# Repatch with the higher number of bins and make sure we get new values.
_monkey_patch_quantization(hp, 21)
monkey_patch_quantization(hp, 21)
samples_set = set(hp.sample_value(100, seed=RandomState(SEED)))
quantized_values_new = set(range(5, 96, 10))
assert samples_set.issubset(set(range(0, 101, 5)))
Expand Down
Loading