Skip to content

Commit 83df948

Browse files
committed
Implemented passing settings to hyperparameter tuner, improved hyperparam test, improved defaults
1 parent 9196266 commit 83df948

File tree

3 files changed

+77
-38
lines changed

3 files changed

+77
-38
lines changed

kernel_tuner/backends/hypertuner.py

Lines changed: 52 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -35,37 +35,18 @@ class HypertunerFunctions(Backend):
3535
"""Class for executing hyperparameter tuning."""
3636
units = {}
3737

38-
def __init__(self, iterations):
38+
def __init__(self, iterations, compiler_options=None):
3939
self.iterations = iterations
40+
self.compiler_options = compiler_options
4041
self.observers = [ScoreObserver(self)]
4142
self.name = platform.processor()
4243
self.max_threads = 1024
4344
self.last_score = None
4445

45-
# set the environment options
46-
env = dict()
47-
env["iterations"] = self.iterations
48-
self.env = env
49-
50-
# check for the methodology package
51-
if methodology_available is not True:
52-
raise ImportError("Unable to import the autotuning methodology, run `pip install autotuning_methodology`.")
53-
54-
def ready_argument_list(self, arguments):
55-
arglist = super().ready_argument_list(arguments)
56-
if arglist is None:
57-
arglist = []
58-
return arglist
59-
60-
def compile(self, kernel_instance):
61-
super().compile(kernel_instance)
62-
path = Path(__file__).parent.parent.parent / "hyperparamtuning"
63-
path.mkdir(exist_ok=True)
64-
65-
# TODO get applications & GPUs args from benchmark
66-
gpus = ["A100", "A4000", "MI250X"]
46+
# set the defaults
47+
self.gpus = ["A100", "A4000", "MI250X"]
6748
folder = "../autotuning_methodology/benchmark_hub/kernels"
68-
applications = [
49+
self.applications = [
6950
{
7051
"name": "dedispersion_milo",
7152
"folder": folder,
@@ -91,6 +72,51 @@ def compile(self, kernel_instance):
9172
"objective_performance_keys": ["time"]
9273
}
9374
]
75+
# any additional settings
76+
self.override = {
77+
"experimental_groups_defaults": {
78+
"repeats": 25,
79+
"samples": self.iterations,
80+
"minimum_fraction_of_budget_valid": 0.01,
81+
},
82+
"statistics_settings": {
83+
"cutoff_percentile": 0.95,
84+
"cutoff_percentile_start": 0.01,
85+
"cutoff_type": "time",
86+
"objective_time_keys": [
87+
"all"
88+
]
89+
}
90+
}
91+
92+
# override the defaults with compiler options if provided
93+
if self.compiler_options is not None:
94+
if "gpus" in self.compiler_options:
95+
self.gpus = self.compiler_options["gpus"]
96+
if "applications" in self.compiler_options:
97+
self.applications = self.compiler_options["applications"]
98+
if "override" in self.compiler_options:
99+
self.override = self.compiler_options["override"]
100+
101+
# set the environment options
102+
env = dict()
103+
env["iterations"] = self.iterations
104+
self.env = env
105+
106+
# check for the methodology package
107+
if methodology_available is not True:
108+
raise ImportError("Unable to import the autotuning methodology, run `pip install autotuning_methodology`.")
109+
110+
def ready_argument_list(self, arguments):
111+
arglist = super().ready_argument_list(arguments)
112+
if arglist is None:
113+
arglist = []
114+
return arglist
115+
116+
def compile(self, kernel_instance):
117+
super().compile(kernel_instance)
118+
path = Path(__file__).parent.parent.parent / "hyperparamtuning"
119+
path.mkdir(exist_ok=True)
94120

95121
# strategy settings
96122
strategy: str = kernel_instance.arguments[0]
@@ -104,18 +130,9 @@ def compile(self, kernel_instance):
104130
'search_method_hyperparameters': hyperparams
105131
}]
106132

107-
# any additional settings
108-
override = {
109-
"experimental_groups_defaults": {
110-
"repeats": 25,
111-
"samples": self.iterations,
112-
"minimum_fraction_of_budget_valid": 0.01,
113-
}
114-
}
115-
116133
name = kernel_instance.name if len(kernel_instance.name) > 0 else kernel_instance.kernel_source.kernel_name
117-
experiments_filepath = generate_experiment_file(name, path, searchspace_strategies, applications, gpus,
118-
override=override, generate_unique_file=True, overwrite_existing_file=True)
134+
experiments_filepath = generate_experiment_file(name, path, searchspace_strategies, self.applications, self.gpus,
135+
override=self.override, generate_unique_file=True, overwrite_existing_file=True)
119136
return str(experiments_filepath)
120137

121138
def start_event(self):

kernel_tuner/core.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -324,10 +324,13 @@ def __init__(
324324
observers=observers,
325325
)
326326
elif lang.upper() == "HYPERTUNER":
327-
dev = HypertunerFunctions(iterations=iterations)
327+
dev = HypertunerFunctions(
328+
iterations=iterations,
329+
compiler_options=compiler_options
330+
)
328331
self.requires_warmup = False
329332
else:
330-
raise ValueError("Sorry, support for languages other than CUDA, OpenCL, HIP, C, and Fortran is not implemented yet")
333+
raise NotImplementedError("Sorry, support for languages other than CUDA, OpenCL, HIP, C, and Fortran is not implemented yet")
331334
self.dev = dev
332335

333336
# look for NVMLObserver and TegraObserver in observers, if present, enable special tunable parameters through nvml/tegra

test/test_hyper.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,25 @@ def test_hyper(env):
1515

1616
target_strategy = "genetic_algorithm"
1717

18-
result, env = tune_hyper_params(target_strategy, hyper_params, iterations=1, verbose=True, cache=None)
18+
compiler_options = {
19+
"gpus": ["A100", "MI250X"],
20+
"override": {
21+
"experimental_groups_defaults": {
22+
"repeats": 1,
23+
"samples": 1,
24+
"minimum_fraction_of_budget_valid": 0.01,
25+
},
26+
"statistics_settings": {
27+
"cutoff_percentile": 0.90,
28+
"cutoff_percentile_start": 0.01,
29+
"cutoff_type": "time",
30+
"objective_time_keys": [
31+
"all"
32+
]
33+
}
34+
}
35+
}
36+
37+
result, env = tune_hyper_params(target_strategy, hyper_params, iterations=1, compiler_options=compiler_options, verbose=True, cache=None)
1938
assert len(result) == 2
2039
assert 'best_config' in env

0 commit comments

Comments
 (0)