From d583cb28ed0e3a857c7dde35baf3d00b87bad428 Mon Sep 17 00:00:00 2001 From: Lopa10ko Date: Mon, 25 Mar 2024 13:36:19 +0300 Subject: [PATCH] fix: change format strings to f-strings --- frameworks/FEDOT/exec.py | 6 +++--- frameworks/FEDOT/exec_ts.py | 10 +++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/frameworks/FEDOT/exec.py b/frameworks/FEDOT/exec.py index 33e877320..da6c6652a 100644 --- a/frameworks/FEDOT/exec.py +++ b/frameworks/FEDOT/exec.py @@ -20,8 +20,8 @@ def run(dataset, config): training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')}) n_jobs = training_params["n_jobs"] - log.info('Running FEDOT with a maximum time of %ss on %s cores, optimizing %s.', - config.max_runtime_seconds, n_jobs, scoring_metric) + log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \ + optimizing {scoring_metric}") runtime_min = config.max_runtime_seconds / 60 fedot = Fedot(problem=config.type, timeout=runtime_min, metric=scoring_metric, seed=config.seed, @@ -64,7 +64,7 @@ def get_fedot_metrics(config): scoring_metric = metrics_mapping.get(config.metric, None) if scoring_metric is None: - log.warning("Performance metric %s not supported.", config.metric) + log.warning(f"Performance metric {config.metric} not supported.") return scoring_metric diff --git a/frameworks/FEDOT/exec_ts.py b/frameworks/FEDOT/exec_ts.py index d00a23e49..36d771b80 100644 --- a/frameworks/FEDOT/exec_ts.py +++ b/frameworks/FEDOT/exec_ts.py @@ -23,8 +23,8 @@ def run(dataset, config): training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')}) n_jobs = training_params["n_jobs"] - log.info('Running FEDOT with a maximum time of %ss on %s cores, optimizing %s.', - config.max_runtime_seconds, n_jobs, scoring_metric) + log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \ + optimizing {scoring_metric}") runtime_min = config.max_runtime_seconds / 60 task = Task( @@ -41,6 +41,10 @@ def run(dataset, config): truth_only = test_df[dataset.target].values predictions = [] + + for label, ts in train_df.groupby(id_column, sort=False): + train_series = ts[dataset.target].to_numpy() + for label in train_df[id_column].unique(): train_sub_df = train_df[train_df[id_column] == label].drop(columns=[id_column], axis=1) train_series = np.array(train_sub_df[dataset.target]) @@ -111,7 +115,7 @@ def get_fedot_metrics(config): scoring_metric = metrics_mapping.get(config.metric, None) if scoring_metric is None: - log.warning("Performance metric %s not supported.", config.metric) + log.warning(f"Performance metric {config.metric} not supported.") return scoring_metric