From 684669070d28fbd71783dbadde2d7c31f337addc Mon Sep 17 00:00:00 2001 From: elaubsch Date: Thu, 11 Jan 2024 12:52:41 -0800 Subject: [PATCH 1/2] Remove alpha parameter from dotnet loss" --- deepcell_spots/training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepcell_spots/training.py b/deepcell_spots/training.py index caafb63..3b250ee 100644 --- a/deepcell_spots/training.py +++ b/deepcell_spots/training.py @@ -137,7 +137,7 @@ def train_model_dot(model, print('Training on {} GPUs'.format(num_gpus)) losses = dotnet_losses.DotNetLosses( - sigma=sigma, alpha=alpha, gamma=gamma, focal=focal) + sigma=sigma, gamma=gamma, focal=focal) loss = { 'offset_regression': losses.regression_loss, From 1c62ba94592c176bc2a4888049f3a7cd733a849d Mon Sep 17 00:00:00 2001 From: elaubsch Date: Thu, 11 Jan 2024 12:57:43 -0800 Subject: [PATCH 2/2] Remove alpha from input params and docstring --- deepcell_spots/training.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/deepcell_spots/training.py b/deepcell_spots/training.py index 3b250ee..ba8391e 100644 --- a/deepcell_spots/training.py +++ b/deepcell_spots/training.py @@ -59,7 +59,6 @@ def train_model_dot(model, model_name=None, focal=False, sigma=3.0, - alpha=0.25, gamma=0.5, lr_sched=rate_scheduler(lr=0.01, decay=0.95), rotation_range=0, @@ -87,7 +86,6 @@ def train_model_dot(model, model_name (str): Name of the model (and name of output file). focal (bool): If true, uses focal loss. sigma (float): The point where the loss changes from L2 to L1. - alpha (float): Scale the focal weight with alpha. gamma (float): Parameter for focal loss (Take the power of the focal weight with gamma.). optimizer (object): Pre-initialized optimizer object (SGD, Adam, etc.).