@@ -22,22 +22,28 @@ def run(dataset, config):
2222 scoring_metric = get_fedot_metrics (config )
2323
2424 training_params = {"preset" : "best_quality" , "n_jobs" : config .cores }
25- training_params .update ({k : v for k , v in config .framework_params .items () if not k .startswith ('_' )})
25+ training_params .update (
26+ {k : v for k , v in config .framework_params .items () if not k .startswith ("_" )}
27+ )
2628 n_jobs = training_params ["n_jobs" ]
2729
2830 log .info (f"Running FEDOT with a maximum time of { config .max_runtime_seconds } s on { n_jobs } cores, \
2931 optimizing { scoring_metric } " )
3032
3133 task = Task (
3234 TaskTypesEnum .ts_forecasting ,
33- TsForecastingParams (forecast_length = dataset .forecast_horizon_in_steps )
35+ TsForecastingParams (forecast_length = dataset .forecast_horizon_in_steps ),
3436 )
3537
3638 train_df , test_df = load_timeseries_dataset (dataset )
3739 id_column = dataset .id_column
3840
39- max_runtime_minutes_per_ts = config .max_runtime_seconds / 60 / train_df [id_column ].nunique ()
40- log .info (f'Fitting FEDOT with a maximum time of { max_runtime_minutes_per_ts } min per series' )
41+ max_runtime_minutes_per_ts = (
42+ config .max_runtime_seconds / 60 / train_df [id_column ].nunique ()
43+ )
44+ log .info (
45+ f"Fitting FEDOT with a maximum time of { max_runtime_minutes_per_ts } min per series"
46+ )
4147
4248 training_duration , predict_duration = 0 , 0
4349 models_count = 0
@@ -51,10 +57,12 @@ def run(dataset, config):
5157 features = train_series ,
5258 target = train_series ,
5359 task = task ,
54- data_type = DataTypesEnum .ts
60+ data_type = DataTypesEnum .ts ,
5561 )
5662
57- test_sub_df = test_df [test_df [id_column ] == label ].drop (columns = [id_column ], axis = 1 )
63+ test_sub_df = test_df [test_df [id_column ] == label ].drop (
64+ columns = [id_column ], axis = 1
65+ )
5866 horizon = len (test_sub_df [dataset .target ])
5967
6068 fedot = Fedot (
@@ -63,8 +71,9 @@ def run(dataset, config):
6371 timeout = max_runtime_minutes_per_ts ,
6472 metric = scoring_metric ,
6573 seed = config .seed ,
66- max_pipeline_fit_time = max_runtime_minutes_per_ts / 5 , # fit at least 5 pipelines
67- ** training_params
74+ max_pipeline_fit_time = max_runtime_minutes_per_ts
75+ / 5 , # fit at least 5 pipelines
76+ ** training_params ,
6877 )
6978
7079 with Timer () as training :
@@ -75,7 +84,7 @@ def run(dataset, config):
7584 try :
7685 prediction = fedot .forecast (train_input , horizon = horizon )
7786 except Exception as e :
78- log .info (f' Pipeline crashed due to { e } . Using no-op forecasting' )
87+ log .info (f" Pipeline crashed due to { e } . Using no-op forecasting" )
7988 prediction = np .full (horizon , train_series [- 1 ])
8089
8190 predict_duration += predict .duration
@@ -92,25 +101,27 @@ def run(dataset, config):
92101 optional_columns [str (quantile )] = all_series_predictions
93102
94103 save_artifacts (fedot , config )
95- return result (output_file = config .output_predictions_file ,
96- predictions = all_series_predictions ,
97- truth = truth_only ,
98- target_is_encoded = False ,
99- models_count = models_count ,
100- training_duration = training_duration ,
101- predict_duration = predict_duration ,
102- optional_columns = pd .DataFrame (optional_columns ))
104+ return result (
105+ output_file = config .output_predictions_file ,
106+ predictions = all_series_predictions ,
107+ truth = truth_only ,
108+ target_is_encoded = False ,
109+ models_count = models_count ,
110+ training_duration = training_duration ,
111+ predict_duration = predict_duration ,
112+ optional_columns = pd .DataFrame (optional_columns ),
113+ )
103114
104115
105116def get_fedot_metrics (config ):
106117 metrics_mapping = dict (
107- mape = ' mape' ,
108- smape = ' smape' ,
109- mase = ' mase' ,
110- mse = ' mse' ,
111- rmse = ' rmse' ,
112- mae = ' mae' ,
113- r2 = 'r2' ,
118+ mape = " mape" ,
119+ smape = " smape" ,
120+ mase = " mase" ,
121+ mse = " mse" ,
122+ rmse = " rmse" ,
123+ mae = " mae" ,
124+ r2 = "r2" ,
114125 )
115126 scoring_metric = metrics_mapping .get (config .metric , None )
116127
@@ -121,27 +132,29 @@ def get_fedot_metrics(config):
121132
122133
123134def save_artifacts (automl , config ):
124-
125- artifacts = config .framework_params .get ('_save_artifacts' , [])
126- if 'models' in artifacts :
135+ artifacts = config .framework_params .get ("_save_artifacts" , [])
136+ if "models" in artifacts :
127137 try :
128- models_dir = output_subdir (' models' , config )
129- models_file = os .path .join (models_dir , ' model.json' )
138+ models_dir = output_subdir (" models" , config )
139+ models_file = os .path .join (models_dir , " model.json" )
130140 automl .current_pipeline .save (models_file )
131141 except Exception as e :
132142 log .info (f"Error when saving 'models': { e } ." , exc_info = True )
133143
134- if ' info' in artifacts :
144+ if " info" in artifacts :
135145 try :
136146 info_dir = output_subdir ("info" , config )
137147 if automl .history :
138- automl .history .save (os .path .join (info_dir , ' history.json' ))
148+ automl .history .save (os .path .join (info_dir , " history.json" ))
139149 else :
140- log .info (f "There is no optimization history info to save." )
150+ log .info ("There is no optimization history info to save." )
141151 except Exception as e :
142- log .info (f"Error when saving info about optimisation history: { e } ." , exc_info = True )
152+ log .info (
153+ f"Error when saving info about optimisation history: { e } ." ,
154+ exc_info = True ,
155+ )
143156
144- if ' leaderboard' in artifacts :
157+ if " leaderboard" in artifacts :
145158 try :
146159 leaderboard_dir = output_subdir ("leaderboard" , config )
147160 if automl .history :
@@ -151,5 +164,5 @@ def save_artifacts(automl, config):
151164 log .info (f"Error when saving 'leaderboard': { e } ." , exc_info = True )
152165
153166
154- if __name__ == ' __main__' :
167+ if __name__ == " __main__" :
155168 call_run (run )
0 commit comments