forked from jordan-g/Zebrafish-Tracking
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tracking.py
1715 lines (1351 loc) · 77.1 KB
/
tracking.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import cv2
import traceback
import scipy.ndimage
import scipy.stats
from scipy import interpolate
import os
import re
import itertools
import time
import pdb
import psutil
from scipy import sparse
import multiprocessing
from multiprocessing import sharedctypes
from functools import partial
from itertools import chain
from moviepy.video.io.ffmpeg_reader import *
from skimage.morphology import skeletonize, thin
from collections import deque
from open_media import open_image, open_video
import utilities
import analysis
try:
xrange
except:
xrange = range
# Headfixed tail tracking global variables
fitted_tail = []
tail_funcs = None
tail_brightness = None
background_brightness = None
tail_length = None
cv2.setNumThreads(0) # Avoids crashes when using multiprocessing with OpenCV
# --- Nick's Added Functions --- #
def subtract_background_from_frames_extended(frames, background, threshold_value = 3, morph = True, kernel_size = [3, 3], n_iterations = 1):
background_subtracted_frames = []
for frame in frames:
absoluate_difference_between_background_and_frame = calculate_absolute_difference_between_background_and_frame(frame, background)
threshold_frame = apply_threshold_to_frame(absoluate_difference_between_background_and_frame, threshold_value = threshold_value)
fish_contour_frame = extract_fish_contour_from_threshold_frame(threshold_frame, morph = morph, kernel_size = kernel_size, n_iterations = n_iterations)
background_subtracted_frames.append(crop_fish_from_frame_using_fish_contour(frame, fish_contour_frame))
return background_subtracted_frames
def extract_background_extended(video_path, num_backgrounds = 1, threshold_value = 10, morph = True, kernel_size = [3, 3], n_iterations = 1, save_background = False):
if not os.path.isfile(video_path):
print('Error! Video: {0} does not exist. Check to make sure the video path has been entered correctly.'.format(video_path))
return
background_array = calculate_backgrounds_as_brightest_pixel_value(video_path, num_backgrounds = num_backgrounds)
try:
capture = cv2.VideoCapture(video_path)
except:
print('Error! Could not open video.'.format(video_path))
return
# video_total_frames = get_video_info(video_path)[1]
video_total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
background_chunk_index = int(video_total_frames / num_backgrounds)
for frame_num in range(video_total_frames):
print('Extracting running average background using fish contours. Processing frame number: {0}/{1}.'.format(frame_num + 1, video_total_frames), end = '\r')
success, frame = capture.read()
if success:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.float32)
if frame_num == 0:
frame_sum = np.zeros(np.shape(frame))
contour_sum = np.zeros(np.shape(frame))
n_background = int(frame_num/background_chunk_index)
try:
background_subtracted_frame = calculate_absolute_difference_between_background_and_frame(frame, background_array[n_background])
except:
background_subtracted_frame = calculate_absolute_difference_between_background_and_frame(frame, background_array[n_background - 1])
threshold_frame = apply_threshold_to_frame(background_subtracted_frame, threshold_value = threshold_value)
fish_contour_frame = extract_fish_contour_from_threshold_frame(threshold_frame, morph = morph, kernel_size = kernel_size, n_iterations = n_iterations)
fish_contour_frame = -fish_contour_frame/255 + 1
frame_sum += frame * fish_contour_frame
contour_sum += fish_contour_frame
print('Extracting running average background using fish contours. Processing frame number: {0}/{1}.'.format(frame_num + 1, video_total_frames))
background = frame_sum / contour_sum
capture.release()
if save_background:
background_path = '{0}_background.tif'.format(video_path[:-4])
cv2.imwrite(background_path, background.astype(np.uint8))
return background.astype(np.uint8)
def calculate_backgrounds_as_brightest_pixel_value(video_path, num_backgrounds = 1):
if not os.path.isfile(video_path):
print('Error! Video: {0} does not exist. Check to make sure the video path has been entered correctly.'.format(video_path))
return
try:
capture = cv2.VideoCapture(video_path)
except:
print('Error! Could not open video.')
return
background_array = []
# video_total_frames = get_video_info(video_path)[1]
video_total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
background_chunk_index = int(video_total_frames / num_backgrounds)
if num_backgrounds >= video_total_frames:
print('Error! Number of backgrounds requested exceeds the total number of frames in the video.')
return
for frame_num in range(video_total_frames):
print('Calculating background. Processing frame number: {0}/{1}.'.format(frame_num + 1, video_total_frames), end = '\r')
success, frame = capture.read()
if success:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if frame_num == 0:
background = frame.copy().astype(np.float32)
mask = np.less(background, frame)
background[mask] = frame[mask]
if frame_num > 0 and frame_num % background_chunk_index == 0:
background_array.append(background)
elif len(background_array) < num_backgrounds:
if (frame_num + 1) == video_total_frames:
background_array.append(background)
print('Calculating background. Processing frame number: {0}/{1}.'.format(frame_num + 1, video_total_frames))
capture.release()
return background_array
def calculate_absolute_difference_between_background_and_frame(frame, background):
background_subtracted_frame = cv2.absdiff(frame, background)
return background_subtracted_frame
def apply_threshold_to_frame(frame, threshold_value = 10, inverted = False):
if inverted:
threshold_type = cv2.THRESH_BINARY_INV
else:
threshold_type = cv2.THRESH_BINARY
threshold_frame = cv2.threshold(frame.astype(np.uint8), threshold_value, 255, threshold_type)[1]
return threshold_frame
def extract_fish_contour_from_threshold_frame(threshold_frame, morph = False, kernel_size = [3, 3], n_iterations = 1):
if morph:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size[0], kernel_size[1]))
threshold_frame = cv2.dilate(threshold_frame, kernel, iterations = n_iterations)
threshold_frame = cv2.erode(threshold_frame, kernel, iterations = n_iterations)
contours = cv2.findContours(threshold_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
max_contour = np.concatenate(max(contours, key = cv2.contourArea))
fish_contour = np.zeros(np.shape(threshold_frame))
for i in range(len(max_contour)):
fish_contour[max_contour[i][1]][max_contour[i][0]] = threshold_frame[max_contour[i][1]][max_contour[i][0]]
fish_contour = cv2.fillPoly(fish_contour, pts = [max_contour], color = (255,255,255))
return fish_contour
def crop_fish_from_frame_using_fish_contour(frame, fish_contour):
fish_frame = np.ones(np.shape(fish_contour)) * 255
fish_contour_values = np.where(fish_contour)
for i in range(np.shape(fish_contour_values)[1]):
fish_frame[fish_contour_values[0][i]][fish_contour_values[1][i]] = frame[fish_contour_values[0][i]][fish_contour_values[1][i]]
return fish_frame
def get_tail_threshold_frame(frame, tail_threshold, inverted = True, morph = True, kernel_size = [3, 3], n_iterations = 1):
threshold_frame = apply_threshold_to_frame(frame, threshold_value = tail_threshold, inverted = inverted)
tail_threshold_frame = extract_fish_contour_from_threshold_frame(threshold_frame, morph = morph, kernel_size = kernel_size, n_iterations = n_iterations)
return tail_threshold_frame
def get_body_threshold_frame(frame, body_threshold, inverted = True, morph = True, kernel_size = [3, 3], n_iterations = 1):
threshold_frame = apply_threshold_to_frame(frame, threshold_value = body_threshold, inverted = inverted)
body_threshold_frame = extract_fish_contour_from_threshold_frame(threshold_frame, morph = morph, kernel_size = kernel_size, n_iterations = n_iterations)
return body_threshold_frame
def extract_body_position_extended(frame, eyes_threshold_value, body_threshold_value, threshold_value = None, threshold_step = 1, erode = False, kernel_size = [3, 3], n_iterations = 1):
if threshold_value == None:
threshold_value = eyes_threshold_value
body_position = None
try:
if threshold_value < body_threshold_value:
threshold_frame = get_threshold_frame(frame, threshold_value)
if erode:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size[0], kernel_size[1]))
threshold_frame = cv2.erode(threshold_frame, kernel, iterations = n_iterations)
contours = cv2.findContours(threshold_frame.astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
if len(contours) == 3:
moments = [cv2.moments(contours[i]) for i in range(len(contours))]
if np.min(np.array([moments[i]['m00'] for i in range(len(moments))])) < 1:
threshold_value += threshold_step
body_position = extract_body_position_extended(frame = frame, eyes_threshold_value = eyes_threshold_value, body_threshold_value = body_threshold_value, threshold_value = threshold_value, threshold_step = threshold_step)
elif body_position is None:
body_position = np.array([np.average([moments[i]['m01'] / moments[i]['m00'] for i in range(len(moments))]), np.average([moments[i]['m10'] / moments[i]['m00'] for i in range(len(moments))])])
else:
threshold_value += threshold_step
body_position = extract_body_position_extended(frame = frame, eyes_threshold_value = eyes_threshold_value, body_threshold_value = body_threshold_value, threshold_value = threshold_value, threshold_step = threshold_step)
else:
body_position = None
except:
body_position = None
return body_position
def show_image(image):
print(np.shape(image), np.max(image), np.min(image))
cv2.imshow("Image Preview", image.astype(np.uint8))
cv2.waitKey(0)
cv2.destroyAllWindows()
# --- Background subtraction --- #
def subtract_background_from_frames(frames, background, bg_sub_threshold, dark_background=False):
'''
Subtract a background image from an array of frames.
Arguments:
frames (ndarray) : (t, y, x) array of frames.
background (ndarray) : (y, x) background image array.
bg_sub_threshold (int) : Threshold of the difference between a pixel and its background value that
will cause to be considered a background pixel (and be set to white/black).
dark_background (bool) : Whether the video has a dark background and light fish. If so, background pixels
will be set to black rather than white.
Returns:
bg_sub_frames (ndarray) : The background-subtracted frames.
'''
# create a mask that is True wherever a pixel value is sufficiently close to the background pixel value
background_mask = (frames - background < bg_sub_threshold) | (frames - background > 255 - bg_sub_threshold)
# subtract the background from the frames
bg_sub_frames = frames - background.astype(float)
# add back the mean background intensity (so the background-subtracted
# frames have similar brightness to the original frames)
bg_sub_frames += np.mean(background)
# set the brightness of the background appropriately
if dark_background:
bg_value = 0
else:
bg_value = 255
bg_sub_frames[background_mask] = bg_value
# make sure the background-subtracted frames are in the range [0, 255]
bg_sub_frames[bg_sub_frames < 0] = 0
bg_sub_frames[bg_sub_frames > 255] = 255
return bg_sub_frames.astype(np.uint8)
# --- Tracking --- #
def open_and_track_video(video_path, background_path, params, tracking_dir, video_number=0, progress_signal=None):
'''
Open and perform tracking on the provided video.
Arguments:
video_path (str) : Path to the video.
params (dict) : Dictionary of tracking parameters.
tracking_dir (str) : Directory in which to save tracking data.
video_number (int) : If tracking a batch of videos, which number this video is.
progress_signal (QSignal) : Signal to use to update the GUI with tracking progress.
'''
# start a timer for recording how long tracking takes
start_time = time.time()
# extract parameters
subtract_background = params['subtract_background']
if params['backgrounds'] is not None:
background = params['backgrounds'][video_number]
elif background_path is not None:
background = cv2.imread(background_path, cv2.IMREAD_GRAYSCALE)
else:
background = None
crop_params = params['crop_params']
n_tail_points = params['n_tail_points']
save_video = params['save_video']
tracking_video_fps = params['tracking_video_fps']
use_multiprocessing = params['use_multiprocessing']
n_crops = len(params['crop_params'])
bg_sub_threshold = params['bg_sub_threshold']
tracking_type = params['type']
dark_background = params['dark_background']
# initialize a counter for the number of frames that have been tracked
n_frames_tracked = 0
if progress_signal:
# send a progress update signal to the controller
percent_complete = 0
progress_signal.emit(video_number, percent_complete)
# create a video capture object that we can re-use
try:
capture = cv2.VideoCapture(video_path)
except:
print("Error: Could not open video.")
return
# get video info
# fps, n_frames_total = get_video_info(video_path)
n_frames_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
fps = capture.get(cv2.CAP_PROP_FPS)
print("Total number of frames to track: {}.".format(n_frames_total))
if tracking_video_fps == 0:
# set tracking video fps to be the same as the original video
tracking_video_fps = fps
if subtract_background and background is None:
print("Calculating background...")
# calculate the background
if n_frames_total > 1000:
frame_nums = utilities.split_evenly(n_frames_total, 1000)
else:
frame_nums = list(range(n_frames_total))
# background = open_video(video_path, frame_nums, return_frames=False, calc_background=True, capture=capture, dark_background=dark_background)
background = extract_background_extended(video_path, num_backgrounds = 10, threshold_value = 8, save_background = True)
# initialize tracking data arrays
tail_coords_array = np.zeros((n_crops, n_frames_total, 2, n_tail_points)) + np.nan
spline_coords_array = np.zeros((n_crops, n_frames_total, 2, n_tail_points)) + np.nan
heading_angle_array = np.zeros((n_crops, n_frames_total, 1)) + np.nan
body_position_array = np.zeros((n_crops, n_frames_total, 2)) + np.nan
eye_coords_array = np.zeros((n_crops, n_frames_total, 2, 2)) + np.nan
# set number of frames to load into memory at a time
big_chunk_size = 500
# split frame numbers into big chunks - we keep only one big chunk of frames in memory at a time
big_split_frame_nums = utilities.split_list_into_chunks(range(n_frames_total), big_chunk_size)
if use_multiprocessing:
# create a pool of workers
pool = multiprocessing.Pool(None)
# create the directory for saving tracking data if it doesn't exist
if not os.path.exists(tracking_dir):
os.makedirs(tracking_dir)
for i in range(len(big_split_frame_nums)):
print("Tracking frames {} to {}...".format(big_split_frame_nums[i][0], big_split_frame_nums[i][-1]))
# get the frame numbers to process
frame_nums = big_split_frame_nums[i]
# boolean indicating whether to have the capture object seek to the starting frame
# this only needs to be done at the beginning to seek to frame 0
seek_to_starting_frame = i == 0
print("Opening frames...")
# load this big chunk of frames
frames = open_video(video_path, frame_nums, capture=capture, seek_to_starting_frame=seek_to_starting_frame)
if i == 0 and params['save_video']:
# create the video writer, for saving a video with tracking overlaid
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
new_video_path = os.path.join(tracking_dir, "{}_tracked_video.avi".format(os.path.splitext(os.path.basename(video_path))[0]))
writer = cv2.VideoWriter(new_video_path, 0, tracking_video_fps,
(frames[0].shape[1], frames[0].shape[0]), True)
print("Tracking frames...")
if use_multiprocessing:
# split the frames into small chunks - we let multiple processes deal with a chunk at a time
small_chunk_size = 100
split_frames = utilities.yield_chunks_from_array(frames, small_chunk_size)
# get the pool of workers to track the chunks of frames
result_list = []
for result in pool.imap(partial(track_frames, params, background, frame_nums), split_frames, 20):
result_list.append(result)
# increase the number of tracked frames counter
n_frames_tracked += small_chunk_size
if progress_signal:
# send a progress update signal to the controller
percent_complete = 100.0*n_frames_tracked/n_frames_total
progress_signal.emit(video_number, percent_complete)
# get the number of frame chunks that have been processed
n_chunks = len(result_list)
# add tracking results to tracking data arrays
tail_coords_array[:, frame_nums, :, :] = np.concatenate([result_list[i][0] for i in range(n_chunks)], axis=1)
spline_coords_array[:, frame_nums, :, :] = np.concatenate([result_list[i][1] for i in range(n_chunks)], axis=1)
heading_angle_array[:, frame_nums, :] = np.concatenate([result_list[i][2] for i in range(n_chunks)], axis=1)
body_position_array[:, frame_nums, :] = np.concatenate([result_list[i][3] for i in range(n_chunks)], axis=1)
eye_coords_array[:, frame_nums, :, :] = np.concatenate([result_list[i][4] for i in range(n_chunks)], axis=1)
else:
# track the big chunk of frames and add results to tracking data arrays
(tail_coords_small_array, spline_coords_small_array,
heading_angle_small_array, body_position_small_array, eye_coords_small_array) = track_frames(params, background, frame_nums, frames)
# increase the number of tracked frames counter
n_frames_tracked += len(frame_nums)
if progress_signal:
# send a progress update signal to the controller
percent_complete = 100.0*n_frames_tracked/n_frames_total
progress_signal.emit(video_number, percent_complete)
# add tracking results to tracking data arrays
tail_coords_array[:, frame_nums, :, :] = tail_coords_small_array
spline_coords_array[:, frame_nums, :, :] = spline_coords_small_array
heading_angle_array[:, frame_nums, :] = heading_angle_small_array
body_position_array[:, frame_nums, :] = body_position_small_array
eye_coords_array[:, frame_nums, :, :] = eye_coords_small_array
# convert tracking coordinates from cropped frame space to original frame space
for k in range(n_crops):
tail_coords_array[k] = get_absolute_coords(tail_coords_array[k], params['crop_params'][k]['offset'])
spline_coords_array[k] = get_absolute_coords(spline_coords_array[k], params['crop_params'][k]['offset'])
body_position_array[k] = get_absolute_coords(body_position_array[k], params['crop_params'][k]['offset'])
eye_coords_array[k] = get_absolute_coords(eye_coords_array[k], params['crop_params'][k]['offset'])
if params['save_video']:
print("Adding frames to tracking video...")
for k in range(len(frames)):
# get the frame & the frame number
frame = frames[k]
frame_num = frame_nums[k]
# create a dictionary with the tracking results for this frame
results = {'tail_coords' : tail_coords_array[:, frame_num, :, :],
'spline_coords': spline_coords_array[:, frame_num, :, :],
'eye_coords' : eye_coords_array[:, frame_num, :, :],
'heading_angle': heading_angle_array[:, frame_num, :],
'body_position': body_position_array[:, frame_num, :]}
# overlay the tracking data onto the frame
tracked_frame = add_tracking_to_frame(frame, results, n_crops=n_crops)
# write to the new video file
writer.write(tracked_frame)
if params['save_video']:
print("Video created: {}.".format(new_video_path))
# release the video writer
writer.release()
# make a tracking params dictionary for this video
tracking_params = params.copy()
tracking_params['video_num'] = video_number
if tracking_type == "freeswimming":
# set tracking variables to None if they weren't used
if not params['track_eyes']:
eye_coords_array = None
if not params['track_tail']:
tail_coords_array = None
spline_coords_array = None
# calculate the tail angles (in degrees)
tail_angle_array = analysis.calculate_freeswimming_tail_angles(heading_angle_array, body_position_array, tail_coords_array)
# save tail angles, body position & heading angle as CSV files
# for tail angles, rows are video frames, columns points along the tail
# for body position, rows are video frames, columns are x & y coordinates
# for heading angle, rows are video frames
if n_crops > 1:
for k in range(n_crops):
np.savetxt(os.path.join(tracking_dir, "{}_tail_angles_crop_{}.csv".format(os.path.splitext(os.path.basename(video_path))[0], k)), tail_angle_array[k], fmt="%.4f", delimiter=",")
np.savetxt(os.path.join(tracking_dir, "{}_body_position_crop_{}.csv".format(os.path.splitext(os.path.basename(video_path))[0], k)), body_position_array[k], fmt="%.4f", delimiter=",")
np.savetxt(os.path.join(tracking_dir, "{}_heading_angle_crop_{}.csv".format(os.path.splitext(os.path.basename(video_path))[0], k)), heading_angle_array[k], fmt="%.4f", delimiter=",")
else:
np.savetxt(os.path.join(tracking_dir, "{}_tail_angles.csv".format(os.path.splitext(os.path.basename(video_path))[0])), tail_angle_array[0], fmt="%.4f", delimiter=",")
np.savetxt(os.path.join(tracking_dir, "{}_body_position.csv".format(os.path.splitext(os.path.basename(video_path))[0])), body_position_array[0], fmt="%.4f", delimiter=",")
np.savetxt(os.path.join(tracking_dir, "{}_heading_angle.csv".format(os.path.splitext(os.path.basename(video_path))[0])), heading_angle_array[0], fmt="%.4f", delimiter=",")
else:
# set tracking variables to None if they weren't used
eye_coords_array = None
body_position_array = None
# calculate the tail angles (in degrees)
tail_angle_array = analysis.calculate_headfixed_tail_angles(params['heading_angle'], tail_coords_array)
# save tail angles as CSV files -- rows are points along the tail, columns are video frames
if n_crops > 1:
for k in range(n_crops):
np.savetxt(os.path.join(tracking_dir, "{}_tail_angles_crop_{}.csv".format(os.path.splitext(os.path.basename(video_path))[0], k)), tail_angle_array[k], fmt="%.4f", delimiter=",")
else:
np.savetxt(os.path.join(tracking_dir, "{}_tail_angles.csv".format(os.path.splitext(os.path.basename(video_path))[0])), tail_angle_array[0], fmt="%.4f", delimiter=",")
# save the tracking data
np.savez(os.path.join(tracking_dir, "{}_tracking.npz".format(os.path.splitext(os.path.basename(video_path))[0])),
tail_coords=tail_coords_array, spline_coords=spline_coords_array,
heading_angle=heading_angle_array, body_position=body_position_array,
eye_coords=eye_coords_array, params=tracking_params)
if use_multiprocessing:
# close the pool of workers
pool.close()
pool.join()
# close the video capture object
capture.release()
# stop the timer
end_time = time.time()
# print the total tracking time
print("Finished tracking. Total time: {}s.".format(end_time - start_time))
def open_and_track_video_batch(params, tracking_dir, progress_signal=None):
'''
Open and perform tracking on a batch of videos.
Arguments:
params (dict) : Dictionary of tracking parameters, including the video paths.
tracking_dir (str) : Directory in which to save tracking data.
progress_signal (QSignal) : Signal to use to update the GUI with tracking progress.
'''
# extract video paths
video_paths = params['video_paths']
# track each video with the same parameters
for i in range(len(video_paths)):
open_and_track_video(video_paths[i], None, params, tracking_dir, i, progress_signal)
def track_frames(params, background, frame_nums, frames):
'''
Perform tracking on the provided frames.
Arguments:
params (dict) : Dictionary of tracking parameters.
background (ndarray/None) : Background to subtract.
frames (ndarray) : Frames to perform tracking on.
Returns:
tail_coords_array (ndarray) : Array containing coordinates of points along the tail.
Dimensions are (# of crops, # of frames, 2, # tail points).
spline_coords_array (ndarray) : Array containing coordinates of points along a spline fitted to the tail.
Dimensions are (# of crops, # of frames, 2, # tail points).
heading_angle_array (ndarray) : Array containing the heading angle of the zebrafish.
Dimensions are (# of crops, # of frames, 1).
body_position_array (ndarray) : Array containing the body center coordinates of the zebrafish.
Dimensions are (# of crops, # of frames, 2).
eye_position_array (ndarray) : Array containing the coordinates of the eyes of the zebrafish.
Dimensions are (# of crops, # of frames, 2, 2).
'''
# extract parameters
crop_params = params['crop_params']
tracking_type = params['type']
n_tail_points = params['n_tail_points']
subtract_background = params['subtract_background']
bg_sub_threshold = params['bg_sub_threshold']
dark_background = params['dark_background']
# get number of frames & number of crops
n_frames = frames.shape[0]
n_crops = len(crop_params)
# initialize tracking data arrays
tail_coords_array = np.zeros((n_crops, n_frames, 2, n_tail_points)) + np.nan
spline_coords_array = np.zeros((n_crops, n_frames, 2, n_tail_points)) + np.nan
heading_angle_array = np.zeros((n_crops, n_frames, 1)) + np.nan
body_position_array = np.zeros((n_crops, n_frames, 2)) + np.nan
eye_coords_array = np.zeros((n_crops, n_frames, 2, 2)) + np.nan
# set booleans for head & tail tracking
track_head = tracking_type == "freeswimming"
track_tail = tracking_type == "headfixed" or params['track_tail'] == True
if subtract_background and background is not None:
# subtract the background
original_frames = frames.copy()
frames = subtract_background_from_frames(frames, background, bg_sub_threshold, dark_background=dark_background)
# frames = subtract_background_from_frames_extended(frames, background, threshold_value = 2)
else:
original_frames = frames
for frame_number in range(n_frames):
# get the frame
frame = frames[frame_number]
for k in range(n_crops):
# get the crop & offset
crop = crop_params[k]['crop']
offset = crop_params[k]['offset']
# crop the frame
cropped_frame = crop_frame(frame, offset, crop)
# track the frame
results, _, _ = track_cropped_frame(cropped_frame, frame_nums[frame_number], params, crop_params[k], original_frame=original_frames[frame_number])
# add coordinates to tracking data arrays
if results['tail_coords'] is not None:
tail_coords_array[k, frame_number, :, :results['tail_coords'].shape[1]] = results['tail_coords']
spline_coords_array[k, frame_number, :, :results['spline_coords'].shape[1]] = results['spline_coords']
heading_angle_array[k, frame_number, :] = results['heading_angle']
body_position_array[k, frame_number, :] = results['body_position']
eye_coords_array[k, frame_number, :, :] = results['eye_coords']
return tail_coords_array, spline_coords_array, heading_angle_array, body_position_array, eye_coords_array
def track_cropped_frame(frame, frame_num, params, crop_params, original_frame=None):
'''
Perform tracking on the provided frame.
Arguments:
frame (ndarray) : Frame to perform tracking on.
params (dict) : Dictionary of tracking parameters.
crop_params (dict) : Dictionary of extra tracking parameters for the cropped frame.
Returns:
results (dict) : Dictionary containing tracking results..
skeleton_frame (ndarray) : The result of skeletonizing the thresholded frame (used by the
GUI to preview the skeleton frame).
body_crop_coords (list) : List of coordinates of the crop around the tracked body position
that is used to track the eyes and tail (for freeswimming fish).
'''
if original_frame is None:
original_frame = frame
# extract tracking type
tracking_type = params['type']
if tracking_type == "freeswimming":
# extract parameters
body_crop = params['body_crop']
track_tail_bool = params['track_tail']
track_eyes_bool = params['track_eyes']
# only crop around the body if we're tracking the eyes and/or tail
crop_around_body = (track_eyes_bool or track_tail_bool) and body_crop is not None
# track the heading angle and body position
if crop_around_body:
heading_angle, body_position, rel_body_position, body_crop_coords, body_crop_frame, body_threshold_frame = track_body(frame, frame_num, params, crop_params, crop_around_body=True)
_, body_crop_original_frame = crop_frame_around_body(original_frame, body_position, body_crop)
else:
heading_angle, body_position = track_body(frame, frame_num, params, crop_params, crop_around_body=False)
rel_body_position = body_position
body_crop_coords = None
body_crop_frame = frame
body_crop_original_frame = original_frame
if track_eyes_bool:
# track the eyes
eye_coords = track_eyes(body_crop_frame, frame_num, params, crop_params)
if eye_coords is not None:
# update the heading angle based on the found eye coordinates
heading_angle = update_heading_angle_from_eye_coords(eye_coords, heading_angle, body_position)
if crop_around_body and body_crop_coords is not None and np.sum(body_crop_frame) > 0:
# convert the eye coords to be relative to the initial frame
eye_coords += body_crop_coords[:, 0][:, np.newaxis].astype(int)
else:
eye_coords = None
if track_tail_bool and body_position is not None:
# track the tail only if the body center was found
tail_coords, spline_coords, skeleton_frame = track_freeswimming_tail(body_crop_frame, frame_num, body_threshold_frame, params, crop_params, rel_body_position, heading_angle, original_frame=body_crop_original_frame)
if tail_coords is not None:
if crop_around_body and body_crop_coords is not None and np.sum(body_crop_frame) > 0:
# convert the tail coords to be relative to the initial frame
tail_coords += body_crop_coords[:, 0][:, np.newaxis].astype(int)
spline_coords += body_crop_coords[:, 0][:, np.newaxis].astype(int)
else:
tail_coords, spline_coords, skeleton_frame = [None]*3
elif tracking_type == "headfixed":
# set body, heading and eye position variables to None since we aren't interested in them
heading_angle, body_position, eye_coords = [None]*3
# track the tail
tail_coords, spline_coords = track_headfixed_tail(frame, params, crop_params)
skeleton_frame = None
body_crop_coords = None
# create a dictionary of results
results = { 'tail_coords' : tail_coords,
'spline_coords' : spline_coords,
'heading_angle' : heading_angle,
'body_position' : body_position,
'eye_coords' : eye_coords }
return results, skeleton_frame, body_crop_coords
# --- Body position & heading angle tracking --- #
def track_body(frame, frame_num, params, crop_params, crop_around_body=True):
# extract parameters
adjust_thresholds = params['adjust_thresholds']
body_threshold = crop_params['body_threshold']
eyes_threshold = crop_params['eyes_threshold']
body_crop = params['body_crop']
# create body threshold frame
body_threshold_frame = get_threshold_frame(frame, body_threshold, min_threshold=None, dilate=False)
# body_threshold_frame = get_body_threshold_frame(frame, body_threshold, kernel_size = [5, 5], n_iterations = 3)
# get heading angle & body position
heading_angle, body_position = get_heading_angle_and_body_position(body_threshold_frame, frame, eyes_threshold, body_threshold)
if crop_around_body:
# create array of body crop coordinates:
# [ y_start y_end ]
# [ x_start x_end ]
# crop the frame around the body
body_crop_coords, body_crop_frame = crop_frame_around_body(frame, body_position, body_crop)
if body_position is None:
rel_body_position = None
else:
# get body center position relative to the body crop
rel_body_position = body_position - body_crop_coords[:, 0]
return heading_angle, body_position, rel_body_position, body_crop_coords, body_crop_frame, body_threshold_frame
return heading_angle, body_position
def get_heading_angle_and_body_position(body_threshold_frame, frame, eyes_threshold, body_threshold):
# find contours in the thresholded frame
try:
image, contours, _ = cv2.findContours(body_threshold_frame.astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except ValueError:
contours, _ = cv2.findContours(body_threshold_frame.astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
try:
if len(contours) > 0:
# choose the contour with the largest area as the body
body_contour = max(contours, key=cv2.contourArea)
if len(body_contour) >= 10:
# fit an ellipse and get the angle and center position
(x, y), (MA, ma), angle = cv2.fitEllipse(body_contour)
height = MA
half_width = ma
rad_angle = angle*np.pi/180.0
# create rotated rectangle mask from the center of the ellipse to one end of the major axis
# this rectangle covers the half of the ellipse that is in the direction of the heading angle
mask_1 = np.zeros(body_threshold_frame.shape)
point_1 = (x + half_width*np.cos(rad_angle), y + half_width*np.sin(rad_angle))
point_2 = (point_1[0] - height*np.sin(rad_angle), point_1[1] + height*np.cos(rad_angle))
point_3 = (x - half_width*np.cos(rad_angle), y - half_width*np.sin(rad_angle))
point_4 = (point_3[0] - height*np.sin(rad_angle), point_3[1] + height*np.cos(rad_angle))
cv2.fillConvexPoly(mask_1, np.array([point_1, point_2, point_4, point_3]).astype(int), 1)
# create rotated rectangle mask from the center of the ellipse to the other end of the major axis
# this rectangle covers the half of the ellipse that is in the opposite direction of the heading angle
mask_2 = np.zeros(body_threshold_frame.shape)
point_1 = (x + half_width*np.cos(rad_angle + np.pi), y + half_width*np.sin(rad_angle + np.pi))
point_2 = (point_1[0] - height*np.sin(rad_angle + np.pi), point_1[1] + height*np.cos(rad_angle + np.pi))
point_3 = (x - half_width*np.cos(rad_angle + np.pi), y - half_width*np.sin(rad_angle + np.pi))
point_4 = (point_3[0] - height*np.sin(rad_angle + np.pi), point_3[1] + height*np.cos(rad_angle + np.pi))
cv2.fillConvexPoly(mask_2, np.array([point_1, point_2, point_4, point_3]).astype(int), 1)
# if the average brightness of the masked frame in the direction of the the heading angle is larger than
# that opposite of the heading angle (ie. the heading angle points toward the tail), flip it
if np.mean(frame[mask_1.astype(bool)]) > np.mean(frame[mask_2.astype(bool)]):
angle += 180
# create an array for the center position
position = extract_body_position_extended(frame, eyes_threshold, body_threshold, threshold_step = 1)
if position is None:
position = np.array([y, x])
if position[0] < 0 or position[1] < 0 or 4*MA*ma < 100:
# discard results if they're erroneuous and if the body area is too small
return [None]*2
else:
return [None]*2
else:
return [None]*2
return angle, position
except:
return [None]*2
def update_heading_angle_from_eye_coords(eye_coords, body_heading_angle, body_position):
# get the heading angle based on eye coordinates
angle = 180.0 + np.arctan((eye_coords[0, 1] - eye_coords[0, 0])/(eye_coords[1, 1] - eye_coords[1, 0]))*180.0/np.pi
if body_heading_angle is not None:
# if the angle is too different from the heading angle found by fitting an ellipse to the body,
# try flipping it
if angle - body_heading_angle > 90:
angle -= 180
elif angle - body_heading_angle < -90:
angle += 180
# if it's still not within 90 degrees, just set it to the body threshold heading angle
if np.abs(angle - body_heading_angle) > 90:
angle = body_heading_angle
else:
# otherwise set the final angle to be a mix of this angle & the body threshold heading angle
angle = 0.7*body_heading_angle + 0.3*angle
return angle
def track_eyes(frame, frame_num, params, crop_params):
# extract parameters
adjust_thresholds = params['adjust_thresholds']
eyes_threshold = crop_params['eyes_threshold']
# threshold the frame to extract the eyes
eyes_threshold_frame = get_threshold_frame(frame, eyes_threshold)
# get eye positions
eye_positions = get_eye_positions(eyes_threshold_frame)
if eye_positions is None and adjust_thresholds: # eyes not found; adjust the threshold & try again
# initialize counter
i = 0
# create a list of head thresholds to go through
eyes_thresholds = list(range(eyes_threshold-1, eyes_threshold-5, -1)) + list(range(eyes_threshold+1, eyes_threshold+5))
while eye_positions is None and i < 8:
# create a thresholded frame using new threshold
eyes_threshold_frame = get_threshold_frame(frame, eyes_thresholds[i])
# get eye positions
eye_positions = get_eye_positions(eyes_threshold_frame)
# increase counter
i += 1
return eye_positions
def get_eye_positions(eyes_threshold_image, prev_eye_coords=None):
# find contours
try:
image, contours, _ = cv2.findContours(eyes_threshold_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except ValueError:
contours, _ = cv2.findContours(eyes_threshold_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours) < 2:
# too few contours found -- we need at least 2 (one for each eye)
return None
# choose the two contours with the largest areas as the eyes
eye_contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
# stop if the largest area is too small
if cv2.contourArea(eye_contours[0]) < 2:
return None
# get moments
moments = [cv2.moments(contour) for contour in eye_contours]
# initialize array to hold eye positions
positions = np.zeros((2, 2))
# get coordinates
for i in range(2):
M = moments[i]
if M['m00'] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
positions[0, i] = cy
positions[1, i] = cx
else:
positions = None
return positions
# --- Freeswimming tail tracking --- #
def track_freeswimming_tail(frame, frame_num, body_threshold_frame, params, crop_params, body_position, heading_angle, original_frame=None):
if original_frame is None:
original_frame = frame
# extract parameters
adjust_thresholds = params['adjust_thresholds']
min_tail_body_dist = params['min_tail_body_dist']
max_tail_body_dist = params['max_tail_body_dist']
n_tail_points = params['n_tail_points']
alt_tail_tracking = params['alt_tail_tracking']
tail_threshold = crop_params['tail_threshold']
# threshold the frame to extract the tail
tail_threshold_frame = get_threshold_frame(frame, tail_threshold, remove_noise=False)
# tail_threshold_frame = get_tail_threshold_frame(frame, tail_threshold, kernel_size = [5, 5], n_iterations = 2)
if alt_tail_tracking:
tail_coords, spline_coords, skeleton_image = track_freeswimming_tail_alt(frame, frame_num, tail_threshold_frame, body_threshold_frame, params, crop_params, body_position, heading_angle)
else:
# get tail coordinates
tail_coords, spline_coords, skeleton_image = get_freeswimming_tail_coords(tail_threshold_frame, body_position, heading_angle,
min_tail_body_dist, max_tail_body_dist,
n_tail_points, alt_tracking=alt_tail_tracking)
if tail_coords is None:
# try increasing the minimum tail-body distance
i = 1
while i <= 5:
tail_coords, spline_coords, skeleton_image = get_freeswimming_tail_coords(tail_threshold_frame, body_position, heading_angle,
min_tail_body_dist+i, max_tail_body_dist,
n_tail_points, alt_tracking=alt_tail_tracking)
i += 1
if adjust_thresholds and tail_coords is None:
# initialize counter
i = 0
# create a list of tail thresholds to go through
tail_thresholds = list(range(tail_threshold-1, tail_threshold-5, -1)) + list(range(tail_threshold+1, tail_threshold+5))
while tail_coords is None and i < 8:
# create a thresholded frame using new threshold
tail_threshold_frame = get_threshold_frame(frame, tail_thresholds[i], remove_noise=False)
# get tail coordinates
tail_coords, spline_coords, skeleton_image = get_freeswimming_tail_coords(tail_threshold_frame, body_position, heading_angle,
min_tail_body_dist, max_tail_body_dist,
n_tail_points, alt_tracking=alt_tail_tracking)
# increase counter
i += 1
return tail_coords, spline_coords, skeleton_image
def track_freeswimming_tail_alt(frame, frame_num, tail_threshold_frame, body_threshold_frame, params, crop_params, body_position, heading_angle):
tail_threshold = crop_params['tail_threshold']
n_tail_points = params['n_tail_points']
radius = params['radius']
max_tail_value = params['max_tail_value']
angle_range = params['angle_range']
# get tail skeleton image
# skeleton_image = get_tail_skeleton_frame(tail_threshold_frame)
skeleton_image = get_tail_thinned_frame(tail_threshold_frame)
start_coord = np.array(body_position)
prev_coord = start_coord
angle = 180 - heading_angle
tail_coords = []
angles = []
i = 0
while prev_coord is not None and i < 200:
next_coord = find_next_coord(frame, prev_coord, angle, radius=radius, max_value=max_tail_value, angle_range=angle_range)
if next_coord is None:
r = radius-1
while next_coord is None and r > 1:
next_coord = find_next_coord(frame, prev_coord, angle, radius=r, max_value=max_tail_value, angle_range=angle_range)
r -= 1
if next_coord is not None:
angle = np.arctan2(np.array([next_coord[1] - prev_coord[1]]), np.array([next_coord[0] - prev_coord[0]]))[0]*180.0/np.pi
tail_coords.append(next_coord)
angles.append(angle)
prev_coord = next_coord
i += 1
if len(tail_coords) > 0:
tail_coords = np.array(tail_coords).T
n_tail_coords = tail_coords.shape[1]
else:
print("Error: Could not calculate tail spline.")
# print("Frame number: {0}".format(frame_num))
return [None]*2 + [skeleton_image]
if n_tail_coords > n_tail_points:
# get evenly-spaced tail indices
tail_nums = np.linspace(0, tail_coords.shape[1]-1, n_tail_points).astype(int)
# pick evenly-spaced points along the tail
tail_coords = tail_coords[:, tail_nums]
n_tail_coords = tail_coords.shape[1]
try:
# make ascending spiral in 3D space
t = np.zeros(n_tail_coords)
t[1:] = np.sqrt((tail_coords[1, 1:] - tail_coords[1, :-1])**2 + (tail_coords[0, 1:] - tail_coords[0, :-1])**2)
t = np.cumsum(t)
t /= t[-1]
nt = np.linspace(0, 1, 100)
# calculate cubic spline
spline_y_coords = interpolate.UnivariateSpline(t, tail_coords[0, :], k=3, s=3)(nt)
spline_x_coords = interpolate.UnivariateSpline(t, tail_coords[1, :], k=3, s=3)(nt)
spline_coords = np.array([spline_y_coords, spline_x_coords])