Skip to content
This repository has been archived by the owner on Feb 11, 2023. It is now read-only.

Commit

Permalink
update params & docstring
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Jan 7, 2019
1 parent bcf3895 commit a6dad15
Show file tree
Hide file tree
Showing 34 changed files with 544 additions and 552 deletions.
8 changes: 4 additions & 4 deletions experiments_ovary_centres/gui_annot_center_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def arg_parse_params():
help='path to file with complete info', default=None)
params = vars(parser.parse_args())
for k in (k for k in params if 'path' in k):
if params[k] is None:
if not params[k]:
continue
params[k] = os.path.abspath(os.path.expanduser(params[k]))
p = os.path.dirname(params[k]) if '*' in params[k] else params[k]
Expand Down Expand Up @@ -139,7 +139,7 @@ def set_false_positive(df_points, mask_eggs):


def set_false_negative(df_points, mask_eggs):
points = df_points[['X', 'Y']].as_matrix().astype(int)
points = df_points[['X', 'Y']].values.astype(int)
for lb in (lb for lb in np.unique(mask_eggs) if lb != 0):
mask = (mask_eggs == lb)
labels = mask[points[:, 1], points[:, 0]]
Expand Down Expand Up @@ -255,7 +255,7 @@ def add_point_correction(x, y, changing=1, limit_dist=DICT_LIMIT_CORRECT):
:param int limit_dist:
"""
global df_center_labeled
points = df_center_labeled[['X', 'Y']].as_matrix()
points = df_center_labeled[['X', 'Y']].values
dists = spatial.distance.cdist(np.array(points), np.array([[x, y]]),
metric='euclidean')
if np.min(dists) < limit_dist:
Expand All @@ -281,7 +281,7 @@ def remove_point(x, y, limit_dist=DICT_LIMIT_REMOVE):
:param int limit_dist:
"""
global df_center_labeled
points = df_center_labeled[['X', 'Y']].as_matrix()
points = df_center_labeled[['X', 'Y']].values
dists = spatial.distance.cdist(np.array(points), np.array([[x, y]]),
metric='euclidean')
if np.min(dists) < limit_dist:
Expand Down
20 changes: 9 additions & 11 deletions experiments_ovary_centres/run_center_candidate_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def find_match_images_segms_centers(path_pattern_imgs, path_pattern_segms,
list_paths = [path_pattern_imgs, path_pattern_segms, path_pattern_center]
df_paths = tl_data.find_files_match_names_across_dirs(list_paths)

if path_pattern_center is None:
if not path_pattern_center:
df_paths.columns = ['path_image', 'path_segm']
df_paths['path_centers'] = ''
else:
Expand Down Expand Up @@ -330,10 +330,10 @@ def export_show_image_points_labels(path_out, img_name, img, seg, points,
img = img / float(np.max(img)) if np.max(img) > 1 else img
tl_visu.draw_image_segm_points(axarr[0], img, points, labels,
seg_contour=seg_centers,
dict_label_marker=dict_label_marker)
lut_label_marker=dict_label_marker)
tl_visu.draw_image_segm_points(axarr[1], seg, points, labels, slic,
seg_contour=seg_centers,
dict_label_marker=dict_label_marker)
lut_label_marker=dict_label_marker)
fig.tight_layout()
fig.savefig(os.path.join(path_out, img_name + fig_suffix + '.png'),
bbox_inches='tight', pad_inches=0)
Expand Down Expand Up @@ -444,15 +444,13 @@ def wrapper_draw_export_slic_centers(args):
return export_show_image_points_labels(*args)


def dataset_load_images_segms_compute_features(params, df_paths,
nb_jobs=NB_THREADS):
def dataset_load_images_segms_compute_features(params, df_paths, nb_jobs=NB_THREADS):
""" create whole dataset composed from loading input data, computing features
and label points by label wether its positive or negative center candidate
and label points by label whether its positive or negative center candidate
:param {str: str} paths:
:param {str: any} params:
:param df_paths: DF
:param int nb_jobs:
:param {str: any} params: parameters
:param DF df_paths: DataFrame
:param int nb_jobs: parallel
:return {str: ...}:
"""
dict_imgs, dict_segms, dict_center = dict(), dict(), dict()
Expand Down Expand Up @@ -746,7 +744,7 @@ def main_train(params):
# feature norm & train classification
nb_holdout = int(np.ceil(len(sizes) * CROSS_VAL_LEAVE_OUT_SEARCH))
cv = seg_clf.CrossValidatePSetsOut(sizes, nb_holdout)
classif, params['path_classif'] = seg_clf.create_classif_train_export(
classif, params['path_classif'] = seg_clf.create_classif_search_train_export(
params['classif'], features, labels, cross_val=cv, params=params,
feature_names=feature_names, nb_search_iter=params['nb_classif_search'],
pca_coef=params.get('pca_coef', None), nb_jobs=params['nb_jobs'],
Expand Down
4 changes: 2 additions & 2 deletions experiments_ovary_centres/run_center_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def load_compute_detect_centers(idx_row, params, classif=None, path_classif='',
_, row = idx_row
dict_center = dict(row)

if classif is None:
if not classif:
dict_classif = seg_clf.load_classifier(path_classif)
classif = dict_classif['clf_pipeline']

Expand Down Expand Up @@ -131,7 +131,7 @@ def get_csv_triplets(path_csv, path_csv_out, path_imgs, path_segs,
def main(params):
""" PIPELINE for new detections
:param {str: str} paths:
:param {str: str} params:
"""
params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)

Expand Down
4 changes: 2 additions & 2 deletions experiments_ovary_detect/run_export_user-annot-segm.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def figure_draw_img_centre_segm(fig, img, centres, segm,
:param int subfig_size:
:return obj:
"""
if fig is None:
if not fig:
norm_size = np.array(img.shape[:2]) / float(np.max(img.shape))
fig, ax = plt.subplots(figsize=(norm_size[::-1] * subfig_size))
ax.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
Expand Down Expand Up @@ -131,7 +131,7 @@ def figure_draw_annot_csv(fig, img, row_slice, subfig_size=FIGURE_SIZE):
:param int subfig_size:
:return obj:
"""
if fig is None:
if not fig:
norm_size = np.array(img.shape[:2]) / float(np.max(img.shape))
fig, ax = plt.subplots(figsize=(norm_size[::-1] * subfig_size))
ax.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
Expand Down
14 changes: 7 additions & 7 deletions experiments_ovary_detect/run_ovary_egg-segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def arg_parse_params(params):
params.update(data)
params.update(arg_params)
for k in (k for k in arg_params if 'path' in k):
if arg_params[k] is None:
if not arg_params[k]:
continue
params[k] = tl_data.update_path(arg_params[k], absolute=True)
assert os.path.exists(params[k]), 'missing: %s' % params[k]
Expand Down Expand Up @@ -380,7 +380,7 @@ def segment_fit_ellipse(seg, centers, fn_preproc_points,
lb = i + 1
ellipse = EllipseModel()
ellipse.estimate(points)
if ellipse is None:
if not ellipse:
continue
logging.debug('ellipse params: %s', repr(ellipse.params))
segm = ell_fit.add_overlap_ellipse(segm, ellipse.params, lb, thr_overlap)
Expand Down Expand Up @@ -415,7 +415,7 @@ def segment_fit_ellipse_ransac(seg, centers, fn_preproc_points, nb_inliers=0.6,
min_samples=nb_min,
residual_threshold=15,
max_trials=250)
if ransac_model is None:
if not ransac_model:
continue
logging.debug('ellipse params: %s', repr(ransac_model.params))
segm = ell_fit.add_overlap_ellipse(segm, ransac_model.params, lb,
Expand Down Expand Up @@ -458,7 +458,7 @@ def segment_fit_ellipse_ransac_segm(seg, centers, fn_preproc_points,
min_samples=nb_inliers,
residual_threshold=25,
max_trials=250)
if ransac_model is None:
if not ransac_model:
continue
logging.debug('ellipse params: %s', repr(ransac_model.params))
segm = ell_fit.add_overlap_ellipse(segm, ransac_model.params, lb,
Expand Down Expand Up @@ -533,7 +533,7 @@ def segment_rg2sp_greedy(slic, seg, centers, labels_fg_prob, path_model,
shape_model['name'], coef_shape, coef_pairwise,
prob_label_trans, greedy_tol=1e-1, allow_obj_swap=allow_obj_swap,
dict_thresholds=dict_thresholds, nb_iter=1000,
dict_debug_history=dict_debug)
debug_history=dict_debug)

if dict_debug is not None:
nb_iter = len(dict_debug['energy'])
Expand Down Expand Up @@ -563,7 +563,7 @@ def segment_rg2sp_graphcut(slic, seg, centers, labels_fg_prob, path_model,
shape_model['name'], coef_shape, coef_pairwise, prob_label_trans,
optim_global=True, allow_obj_swap=allow_obj_swap,
dict_thresholds=dict_thresholds, nb_iter=250,
dict_debug_history=dict_debug)
debug_history=dict_debug)

if dict_debug is not None:
nb_iter = len(dict_debug['energy'])
Expand Down Expand Up @@ -706,7 +706,7 @@ def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT):
return name
# img = seg / float(seg.max())
slic = seg_spx.segment_slic_img2d(img_rgb, sp_size=params['slic_size'],
rltv_compact=params['slic_regul'])
relative_compact=params['slic_regul'])

path_segm = os.path.join(params['path_exp'], 'input', name + '.png')
export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers)
Expand Down
2 changes: 1 addition & 1 deletion experiments_ovary_detect/run_ovary_segm_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def evaluate_folder(path_dir, dict_paths, export_visual=EXPORT_VUSIALISATION):
for _, row in df_paths.iterrows():
expert_visual(row, name, path_out=dict_paths['results'])

if dict_paths['annots'] is None:
if not dict_paths['annots']:
logging.info('no Annotation given')
return {'method': name, 'count': 0}

Expand Down
2 changes: 1 addition & 1 deletion experiments_segmentation/run_compute_stat_annot_segm.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def aparse_params(dict_paths):
for k in dict_paths:
assert os.path.isdir(os.path.dirname(dict_paths[k])), \
'missing: (%s) "%s"' % (k, os.path.dirname(dict_paths[k]))
if args['drop_labels'] is None:
if not args['drop_labels']:
args['drop_labels'] = []
return dict_paths, args

Expand Down
4 changes: 2 additions & 2 deletions experiments_segmentation/run_segm_slic_classif_graphcut.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def _path_out_img(params, dir_name, name):
# if img.ndim == 2:
# img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
slic = seg_spx.segment_slic_img2d(img, sp_size=params['slic_size'],
rltv_compact=params['slic_regul'])
relative_compact=params['slic_regul'])
img = tl_data.convert_img_color_from_rgb(img, params.get('clr_space', 'rgb'))
logging.debug('computed SLIC with %i labels', slic.max())
if show_debug_imgs:
Expand Down Expand Up @@ -563,7 +563,7 @@ def load_train_classifier(params, features, labels, feature_names, sizes,
if k.startswith('path_') or k.startswith('gc_')})
logging.debug('loaded PARAMETERS: %s', repr(params))
else:
classif, path_classif = seg_clf.create_classif_train_export(
classif, path_classif = seg_clf.create_classif_search_train_export(
params['classif'], features, labels, cross_val=cv, params=params,
feature_names=feature_names, pca_coef=params['pca_coef'],
eval_metric=params.get('classif_metric', 'f1'),
Expand Down
18 changes: 8 additions & 10 deletions handling_annotations/run_image_color_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def see_images_color_info(path_images, px_thr=THRESHOLD_INVALID_PIXELS):
""" look to the folder on all images and estimate most frequent colours
:param [str] path_images: list of images
:param px_th: float, percentage of nb clr pixels to be assumed as important
:param float px_th: percentage of nb clr pixels to be assumed as important
:return {}:
"""
if not os.path.isdir(os.path.dirname(path_images)):
Expand Down Expand Up @@ -102,26 +102,24 @@ def perform_quantize_image(path_image, list_colors, method='color'):
# plt.show()


def quantize_folder_images(path_images, list_colors=None, method='color',
def quantize_folder_images(path_images, colors=None, method='color',
px_threshold=THRESHOLD_INVALID_PIXELS, nb_jobs=1):
""" perform single or multi thread image quantisation
:param str method:
:param float px_threshold:
:param str path_images:, input directory and image pattern for loading
:param list_colors: [(int, int, int)], list of possible colours
:param colors: [(int, int, int)], list of possible colours
:param str method: interpolation method
:param float px_threshold:
:param nb_jobs: int
:param float px_threshold: pixel threshold
:param int nb_jobs: number of jobs
"""
path_imgs = sorted(glob.glob(path_images))
logging.info('found %i images', len(path_imgs))
if list_colors is None:
if colors is None:
dict_colors = see_images_color_info(path_images, px_thr=px_threshold)
list_colors = [c for c in dict_colors]
colors = [c for c in dict_colors]

_wrapper_quantize_img = partial(perform_quantize_image,
method=method, list_colors=list_colors)
method=method, list_colors=colors)
iterate = tl_expt.WrapExecuteSequence(_wrapper_quantize_img, path_imgs,
nb_jobs=nb_jobs,
desc='quantize images')
Expand Down
6 changes: 3 additions & 3 deletions handling_annotations/run_image_convert_label_color.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,9 @@ def convert_folder_images(path_images, path_out, path_json=None, nb_jobs=1):
""" perform single or multi thread image quantisation
:param [str] path_images: list of input images
:param path_out: output directory
:param path_json: path to json file
:param int nb_jobs: int
:param str path_out: output directory
:param str path_json: path to json file
:param int int nb_jobs:
"""
assert os.path.isdir(os.path.dirname(path_images)), \
'input folder does not exist'
Expand Down
2 changes: 1 addition & 1 deletion handling_annotations/run_segm_annot_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def parse_arg_params():
def perform_img_inpaint(path_img, labels):
""" perform the quantization together with loading and exporting
:param path_img: str
:param str path_img: image path
"""
logging.debug('repaint labels %s for image: "%s"', repr(labels), path_img)
img = np.array(tl_data.io.imread(path_img), dtype=np.float)
Expand Down

0 comments on commit a6dad15

Please sign in to comment.