Skip to content

Commit 9504820

Browse files
committed
Formatting changes
1 parent 3b7faea commit 9504820

File tree

5 files changed

+77
-56
lines changed

5 files changed

+77
-56
lines changed

amlb/benchmark.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -656,7 +656,6 @@ def handle_unfulfilled(message, on_auto="warn"):
656656

657657

658658
class BenchmarkTask:
659-
660659
def __init__(self, benchmark: Benchmark, task_def, fold):
661660
"""
662661

amlb/resources.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,10 @@ def benchmark_definition(self, name: str, defaults: TaskConstraint | None = None
212212
return self._benchmark_definition(name, self.config, defaults)
213213

214214
def _benchmark_definition(
215-
self, name: str, config_: Namespace, defaults: TaskConstraint | None = None
215+
self,
216+
name: str,
217+
config_: Namespace,
218+
defaults_for_task: TaskConstraint | None = None,
216219
):
217220
"""
218221
:param name: name of the benchmark as defined by resources/benchmarks/{name}.yaml, the path to a user-defined benchmark description file or a study id.
@@ -222,8 +225,9 @@ def _benchmark_definition(
222225
file_defaults, tasks, benchmark_path, benchmark_name = benchmark_load(
223226
name, config_.benchmarks.definition_dir
224227
)
225-
if defaults is not None:
226-
defaults = Namespace(**dataclasses.asdict(defaults))
228+
defaults = None
229+
if defaults_for_task is not None:
230+
defaults = Namespace(**dataclasses.asdict(defaults_for_task))
227231
defaults = Namespace.merge(
228232
defaults, file_defaults, Namespace(name="__defaults__")
229233
)
@@ -261,7 +265,6 @@ def _add_task_defaults(task: Namespace, config_: Namespace):
261265
if task["metric"] is None:
262266
task["metric"] = None
263267

264-
265268
if task["ec2_instance_type"] is None:
266269
task["ec2_instance_type"] = Resources.lookup_ec2_instance_type(
267270
config_, task.cores
@@ -311,8 +314,10 @@ def lookup_suitable_instance_size(cores_to_size: Namespace, cores: int) -> str:
311314
if cores <= 0 or cores > max(supported_cores):
312315
return cores_to_size.default
313316

314-
cores = next((c for c in sorted(supported_cores) if c >= cores), "default")
315-
return cores_to_size[str(cores)]
317+
best_match = next(
318+
(str(c) for c in sorted(supported_cores) if c >= cores), "default"
319+
)
320+
return cores_to_size[best_match]
316321

317322
@staticmethod
318323
def generate_task_identifier(task: Namespace) -> str | None:

frameworks/FEDOT/__init__.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,10 @@ def run_fedot_tabular(dataset: Dataset, config: TaskConfig):
2626
__file__, "exec.py", input_data=data, dataset=dataset, config=config
2727
)
2828

29+
2930
def run_fedot_timeseries(dataset: Dataset, config: TaskConfig):
3031
from frameworks.shared.caller import run_in_venv
32+
3133
dataset = deepcopy(dataset)
3234

3335
data = dict(
@@ -43,6 +45,6 @@ def run_fedot_timeseries(dataset: Dataset, config: TaskConfig):
4345
repeated_item_id=dataset.repeated_item_id,
4446
)
4547

46-
return run_in_venv(__file__, "exec_ts.py",
47-
input_data=data, dataset=dataset, config=config)
48-
48+
return run_in_venv(
49+
__file__, "exec_ts.py", input_data=data, dataset=dataset, config=config
50+
)

frameworks/FEDOT/exec.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,13 @@
1313
def run(dataset, config):
1414
log.info("\n**** FEDOT ****\n")
1515

16-
is_classification = config.type == 'classification'
16+
is_classification = config.type == "classification"
1717
scoring_metric = get_fedot_metrics(config)
1818

1919
training_params = {"preset": "best_quality", "n_jobs": config.cores}
20-
training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')})
20+
training_params.update(
21+
{k: v for k, v in config.framework_params.items() if not k.startswith("_")}
22+
)
2123
n_jobs = training_params["n_jobs"]
2224

2325
log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \
@@ -62,15 +64,15 @@ def run(dataset, config):
6264

6365
def get_fedot_metrics(config):
6466
metrics_mapping = dict(
65-
acc='accuracy',
66-
auc='roc_auc',
67-
f1='f1',
68-
logloss='neg_log_loss',
69-
mae='mae',
70-
mse='mse',
71-
msle='msle',
72-
r2='r2',
73-
rmse='rmse',
67+
acc="accuracy",
68+
auc="roc_auc",
69+
f1="f1",
70+
logloss="neg_log_loss",
71+
mae="mae",
72+
mse="mse",
73+
msle="msle",
74+
r2="r2",
75+
rmse="rmse",
7476
)
7577
scoring_metric = metrics_mapping.get(config.metric, None)
7678

frameworks/FEDOT/exec_ts.py

Lines changed: 48 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -22,22 +22,28 @@ def run(dataset, config):
2222
scoring_metric = get_fedot_metrics(config)
2323

2424
training_params = {"preset": "best_quality", "n_jobs": config.cores}
25-
training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')})
25+
training_params.update(
26+
{k: v for k, v in config.framework_params.items() if not k.startswith("_")}
27+
)
2628
n_jobs = training_params["n_jobs"]
2729

2830
log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \
2931
optimizing {scoring_metric}")
3032

3133
task = Task(
3234
TaskTypesEnum.ts_forecasting,
33-
TsForecastingParams(forecast_length=dataset.forecast_horizon_in_steps)
35+
TsForecastingParams(forecast_length=dataset.forecast_horizon_in_steps),
3436
)
3537

3638
train_df, test_df = load_timeseries_dataset(dataset)
3739
id_column = dataset.id_column
3840

39-
max_runtime_minutes_per_ts = config.max_runtime_seconds / 60 / train_df[id_column].nunique()
40-
log.info(f'Fitting FEDOT with a maximum time of {max_runtime_minutes_per_ts}min per series')
41+
max_runtime_minutes_per_ts = (
42+
config.max_runtime_seconds / 60 / train_df[id_column].nunique()
43+
)
44+
log.info(
45+
f"Fitting FEDOT with a maximum time of {max_runtime_minutes_per_ts}min per series"
46+
)
4147

4248
training_duration, predict_duration = 0, 0
4349
models_count = 0
@@ -51,10 +57,12 @@ def run(dataset, config):
5157
features=train_series,
5258
target=train_series,
5359
task=task,
54-
data_type=DataTypesEnum.ts
60+
data_type=DataTypesEnum.ts,
5561
)
5662

57-
test_sub_df = test_df[test_df[id_column] == label].drop(columns=[id_column], axis=1)
63+
test_sub_df = test_df[test_df[id_column] == label].drop(
64+
columns=[id_column], axis=1
65+
)
5866
horizon = len(test_sub_df[dataset.target])
5967

6068
fedot = Fedot(
@@ -63,8 +71,9 @@ def run(dataset, config):
6371
timeout=max_runtime_minutes_per_ts,
6472
metric=scoring_metric,
6573
seed=config.seed,
66-
max_pipeline_fit_time=max_runtime_minutes_per_ts / 5, # fit at least 5 pipelines
67-
**training_params
74+
max_pipeline_fit_time=max_runtime_minutes_per_ts
75+
/ 5, # fit at least 5 pipelines
76+
**training_params,
6877
)
6978

7079
with Timer() as training:
@@ -75,7 +84,7 @@ def run(dataset, config):
7584
try:
7685
prediction = fedot.forecast(train_input, horizon=horizon)
7786
except Exception as e:
78-
log.info(f'Pipeline crashed due to {e}. Using no-op forecasting')
87+
log.info(f"Pipeline crashed due to {e}. Using no-op forecasting")
7988
prediction = np.full(horizon, train_series[-1])
8089

8190
predict_duration += predict.duration
@@ -92,25 +101,27 @@ def run(dataset, config):
92101
optional_columns[str(quantile)] = all_series_predictions
93102

94103
save_artifacts(fedot, config)
95-
return result(output_file=config.output_predictions_file,
96-
predictions=all_series_predictions,
97-
truth=truth_only,
98-
target_is_encoded=False,
99-
models_count=models_count,
100-
training_duration=training_duration,
101-
predict_duration=predict_duration,
102-
optional_columns=pd.DataFrame(optional_columns))
104+
return result(
105+
output_file=config.output_predictions_file,
106+
predictions=all_series_predictions,
107+
truth=truth_only,
108+
target_is_encoded=False,
109+
models_count=models_count,
110+
training_duration=training_duration,
111+
predict_duration=predict_duration,
112+
optional_columns=pd.DataFrame(optional_columns),
113+
)
103114

104115

105116
def get_fedot_metrics(config):
106117
metrics_mapping = dict(
107-
mape='mape',
108-
smape='smape',
109-
mase='mase',
110-
mse='mse',
111-
rmse='rmse',
112-
mae='mae',
113-
r2='r2',
118+
mape="mape",
119+
smape="smape",
120+
mase="mase",
121+
mse="mse",
122+
rmse="rmse",
123+
mae="mae",
124+
r2="r2",
114125
)
115126
scoring_metric = metrics_mapping.get(config.metric, None)
116127

@@ -121,27 +132,29 @@ def get_fedot_metrics(config):
121132

122133

123134
def save_artifacts(automl, config):
124-
125-
artifacts = config.framework_params.get('_save_artifacts', [])
126-
if 'models' in artifacts:
135+
artifacts = config.framework_params.get("_save_artifacts", [])
136+
if "models" in artifacts:
127137
try:
128-
models_dir = output_subdir('models', config)
129-
models_file = os.path.join(models_dir, 'model.json')
138+
models_dir = output_subdir("models", config)
139+
models_file = os.path.join(models_dir, "model.json")
130140
automl.current_pipeline.save(models_file)
131141
except Exception as e:
132142
log.info(f"Error when saving 'models': {e}.", exc_info=True)
133143

134-
if 'info' in artifacts:
144+
if "info" in artifacts:
135145
try:
136146
info_dir = output_subdir("info", config)
137147
if automl.history:
138-
automl.history.save(os.path.join(info_dir, 'history.json'))
148+
automl.history.save(os.path.join(info_dir, "history.json"))
139149
else:
140-
log.info(f"There is no optimization history info to save.")
150+
log.info("There is no optimization history info to save.")
141151
except Exception as e:
142-
log.info(f"Error when saving info about optimisation history: {e}.", exc_info=True)
152+
log.info(
153+
f"Error when saving info about optimisation history: {e}.",
154+
exc_info=True,
155+
)
143156

144-
if 'leaderboard' in artifacts:
157+
if "leaderboard" in artifacts:
145158
try:
146159
leaderboard_dir = output_subdir("leaderboard", config)
147160
if automl.history:
@@ -151,5 +164,5 @@ def save_artifacts(automl, config):
151164
log.info(f"Error when saving 'leaderboard': {e}.", exc_info=True)
152165

153166

154-
if __name__ == '__main__':
167+
if __name__ == "__main__":
155168
call_run(run)

0 commit comments

Comments
 (0)