Skip to content

Commit

Permalink
fix: change format strings to f-strings
Browse files Browse the repository at this point in the history
  • Loading branch information
Lopa10ko committed Mar 25, 2024
1 parent 11c13ce commit d583cb2
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
6 changes: 3 additions & 3 deletions frameworks/FEDOT/exec.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ def run(dataset, config):
training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')})
n_jobs = training_params["n_jobs"]

log.info('Running FEDOT with a maximum time of %ss on %s cores, optimizing %s.',
config.max_runtime_seconds, n_jobs, scoring_metric)
log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \
optimizing {scoring_metric}")
runtime_min = config.max_runtime_seconds / 60

fedot = Fedot(problem=config.type, timeout=runtime_min, metric=scoring_metric, seed=config.seed,
Expand Down Expand Up @@ -64,7 +64,7 @@ def get_fedot_metrics(config):
scoring_metric = metrics_mapping.get(config.metric, None)

if scoring_metric is None:
log.warning("Performance metric %s not supported.", config.metric)
log.warning(f"Performance metric {config.metric} not supported.")

return scoring_metric

Expand Down
10 changes: 7 additions & 3 deletions frameworks/FEDOT/exec_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def run(dataset, config):
training_params.update({k: v for k, v in config.framework_params.items() if not k.startswith('_')})
n_jobs = training_params["n_jobs"]

log.info('Running FEDOT with a maximum time of %ss on %s cores, optimizing %s.',
config.max_runtime_seconds, n_jobs, scoring_metric)
log.info(f"Running FEDOT with a maximum time of {config.max_runtime_seconds}s on {n_jobs} cores, \
optimizing {scoring_metric}")
runtime_min = config.max_runtime_seconds / 60

task = Task(
Expand All @@ -41,6 +41,10 @@ def run(dataset, config):
truth_only = test_df[dataset.target].values
predictions = []


for label, ts in train_df.groupby(id_column, sort=False):
train_series = ts[dataset.target].to_numpy()

for label in train_df[id_column].unique():
train_sub_df = train_df[train_df[id_column] == label].drop(columns=[id_column], axis=1)
train_series = np.array(train_sub_df[dataset.target])
Expand Down Expand Up @@ -111,7 +115,7 @@ def get_fedot_metrics(config):
scoring_metric = metrics_mapping.get(config.metric, None)

if scoring_metric is None:
log.warning("Performance metric %s not supported.", config.metric)
log.warning(f"Performance metric {config.metric} not supported.")

return scoring_metric

Expand Down

0 comments on commit d583cb2

Please sign in to comment.