-
Notifications
You must be signed in to change notification settings - Fork 12
/
upload.py
2817 lines (2239 loc) · 107 KB
/
upload.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# A collection of tools to remotely access a CATMAID server via its API
#
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
""" This module contains functions to push data to a Catmaid server.
"""
from datetime import datetime as dt
from datetime import timezone
import json
import numbers
import os
import tempfile
import traceback
import numpy as np
import pandas as pd
import navis as ns
import requests
import seaborn as sns
from scipy.spatial.distance import cdist
from . import (core, utils, config, cache, fetch, client)
__all__ = sorted(['add_annotations', 'remove_annotations',
'add_tags', 'delete_tags',
'delete_neuron',
'rename_neurons',
'add_meta_annotations', 'remove_meta_annotations',
'upload_neuron', 'upload_volume',
'update_radii', 'replace_skeleton',
'join_skeletons', 'join_nodes',
'link_connector', 'delete_nodes',
'add_connector', 'transfer_neuron',
'differential_upload', 'move_nodes',
'push_new_root', 'add_node',
'update_node_confidence',
'delete_volume', 'set_nodes_reviewed'])
# Set up logging
logger = config.logger
@cache.never_cache
def upload_volume(x, name, comments=None, remote_instance=None):
"""Upload volume/mesh to CatmaidInstance.
Parameters
----------
x : Volume | dict
Volume to export. Can be::
- pymaid.Volume
- dict: {
'faces': array-like,
'vertices': array-like
}
name : str
Name of volume. If ``None`` will use the Volume's
``.name`` property.
comments : str, optional
Comments to upload.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
Server response.
"""
if isinstance(x, ns.Volume):
verts = x.vertices.astype(int).tolist()
faces = x.faces.astype(int).tolist()
elif isinstance(x, dict):
verts = x['vertices'].astype(int).tolist()
faces = x['faces'].astype(int).tolist()
else:
raise TypeError('Expected navis or pymaid Volume or dictionary, '
'got "{}"'.format(type(x)))
if not isinstance(name, str) and isinstance(x, ns.Volume):
name = getattr(x, 'name', 'not named')
remote_instance = utils._eval_remote_instance(remote_instance)
postdata = {'title': name,
'type': 'trimesh',
'mesh': json.dumps([verts, faces]),
'comment': comments if comments else ''
}
url = remote_instance._upload_volume_url()
response = remote_instance.fetch(url, post=postdata)
if 'success' in response and response['success'] is True:
pass
else:
logger.error('Error exporting volume {}'.format(name))
return response
@cache.never_cache
def delete_volume(x, no_prompt=False, remote_instance=None):
"""Delete volume from Catmaid Instance.
Parameters
----------
x : int | str
Name (str) or ID (int) of volume to delete.
no_prompt : bool, optional
If True, will skip prompt to confirm deletion.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
Server response.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, (str, int, np.integer)):
raise TypeError('Expected volume name (str) or ID (int), '
'got "{}"'.format(type(x)))
# First, get volume IDs
get_volumes_url = remote_instance._get_volumes()
resp = remote_instance.fetch(get_volumes_url)
all_vols = pd.DataFrame(resp['data'], columns=resp['columns'])
# Get volume name + ID
if isinstance(x, (int, np.integer)):
id2name = all_vols.set_index('id').name.to_dict()
if x not in id2name:
raise ValueError('Volume "{}" not found'.format(x))
vol_name = id2name[x]
vol_id = x
elif isinstance(x, str):
name2id = all_vols.set_index('name').id.to_dict()
if x not in name2id:
raise ValueError('Volume "{}" not found'.format(x))
vol_name = x
vol_id = name2id[x]
if not no_prompt:
# Now prompt
answer = ""
q = 'Please confirm deletion of Volume "{}" (ID {}) [Y/N] '.format(vol_name, vol_id)
while answer not in ["y", "n"]:
answer = input(q).lower()
if answer != 'y':
return
url = remote_instance._get_volume_details(vol_id)
req = remote_instance._session.delete(url)
req.raise_for_status()
resp = req.json()
if 'error' in resp:
logger.error('Error deleting volume {}: {}'.format(x, resp))
return resp
def transfer_neuron(x, source_instance, target_instance, move_tags=False,
move_annotations=False, move_connectors=False,
force_id=False, no_prompt=False):
"""Copy neuron(s) from one CatmaidInstance to another.
Note that skeleton, node and connector IDs will change (see server
response for old->new mapping). Also: node confidences are currently not
transferred.
Parameters
----------
x : Skeleton ID(s)
Neuron(s) to move from ``source_instance`` to
``target_instance``.
source_instance : CatmaidInstance
Instance that the neuron(s) currently live in.
target_instance : CatmaidInstance
Instance to copy the neuron(s) to.
move_tags : bool, optional
If True, will upload node tags from ``x.tags``.
move_annotations : bool, optional
If True will upload annotations from ``x.annotations``.
move_connectors : bool, optional
If True will upload connectors from ``x.connectors``.
force_id : bool, optional
If True and neuron/skeleton IDs already exist in
target instance, they will be replaced. **Use this with
extrem caution as this will destroy the existing
skeleton!**
no_prompt : bool, optional
If True, will not prompt before transferring neurons!
Returns
-------
dict
Server response with new skeleton/node IDs::
{
'neuron_id': new neuron ID,
'skeleton_id': new skeleton ID,
'node_id_map': {'old_node_id': new_node_id, ...},
'annotations': if import_annotations=True,
'tags': if tags=True
}
"""
# TODOs:
# - move node confidences
if not isinstance(source_instance, client.CatmaidInstance):
raise TypeError('"source_instance" must be CatmaidInstance not "{}"'.format(type(source_instance)))
if not isinstance(target_instance, client.CatmaidInstance):
raise TypeError('"target_instance" must be CatmaidInstance not "{}"'.format(type(target_instance)))
if source_instance == target_instance:
raise ValueError('source_instance must the same as target_instance')
# We can't use the decorator in this case because the remote instances are
# not a "remote_instance" keyword argument
old_caching = source_instance.caching
source_instance.caching = False
try:
skids = utils.eval_skids(x, remote_instance=source_instance)
neurons = fetch.get_neurons(skids, remote_instance=source_instance)
if not isinstance(neurons, core.CatmaidNeuronList):
neurons = core.CatmaidNeuronList(neurons)
if move_annotations:
neurons.get_annotations()
except BaseException:
raise
finally:
source_instance.caching = old_caching
if not no_prompt:
summary = neurons.summary()[['name', 'id', 'n_nodes']]
if move_connectors:
summary['n_connectors'] = neurons.n_connectors
if move_tags:
summary['n_tags'] = [len(n.tags) for n in neurons]
if move_annotations:
summary['n_annotations'] = [len(n.annotations) for n in neurons]
print(summary.to_string())
q = 'Transferring above neurons from {} (project ID {}) to {} (project ID {}). Proceed? [Y/N] '
q = q.format(source_instance.server,
source_instance.project_id,
target_instance.server,
target_instance.project_id)
answer = ""
while answer not in ["y", "n"]:
answer = input(q).lower()
if answer != 'y':
return
return upload_neuron(neurons,
import_tags=move_tags,
import_annotations=move_annotations,
import_connectors=move_connectors,
skeleton_id=neurons.skeleton_id.astype(int) if force_id else None,
force_id=force_id,
source_id=neurons.skeleton_id.astype(int),
source_project_id=source_instance.project_id,
source_url=source_instance.server,
remote_instance=target_instance)
@cache.never_cache
def upload_neuron(x, import_tags=False, import_annotations=False,
import_connectors=False, reuse_existing_connectors=True,
skeleton_id=None, neuron_id=None, force_id=False,
source_id=None, source_project_id=None,
source_url=None, source_type=None, remote_instance=None):
"""Export (upload) neurons to CatmaidInstance.
Note that skeleton, node and connector IDs will change (see server
response for old->new mapping). Neuron to import must not have more than one
skeleton (i.e. disconnected components = more than one root node).
Parameters
----------
x : CatmaidNeuron/List
Neurons to upload.
import_tags : bool, optional
If True, will upload node tags from ``x.tags``.
import_annotations : bool, optional
If True will upload annotations from ``x.annotations``.
import_connectors : bool, optional
If True will upload connectors from ``x.connectors``.
reuse_existing_connectors : bool, optional
Only matters when import_connectors is True.
If True will look in the remote_instance at the
location of each of ``x``'s connectors, and if
present, ``x`` will be linked to that existing
connector instead of a new (duplicate) connector being
created at that location. If False all of
``x.connectors`` are uploaded as new.
skeleton_id : int, optional
Use this to set the Id of the new skeleton(s). If not
provided will will generate a new ID upon export.
neuron_id : int, optional
Use this to associate the new skeleton(s) with an
existing neuron.
force_id : bool, optional
If True and neuron/skeleton IDs already exist in
project, their instances will be replaced. If False
and you pass ``neuron_id`` or ``skeleton_id`` that
already exist, an error will be thrown.
source_id : int, optional
source_project_id : int, optional
source_url : str, optional
source_type : "skeleton" | "segmentation", optional
``source_{}`` are optional fields that will be
associated with the newly uploaded neuron to help keep
track of neurons' origins. You can use
:func:`~pymaid.get_origin`, :func:`~pymaid.get_skids_by_origin`
and :func:`~pymaid.find_neurons` to look up neurons
by their origin.
remote_instance : CatmaidInstance, optional
CatmaidInstance to upload to. If not passed directly,
will try using global.
Returns
-------
dict
Server response with new skeleton/node IDs::
{
'neuron_id': new neuron ID,
'skeleton_id': new skeleton ID,
'node_id_map': {'old_node_id': new_node_id, ...},
'annotations': if import_annotations=True,
'tags': if tags=True
}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x, ns.NeuronList):
# Check if any neurons are only single nodes
if any(x.n_nodes <= 1) and (not isinstance(skeleton_id, type(None))
or not isinstance(neuron_id, type(None))):
raise ValueError('Single-node neurons can currently not be uploaded '
'with a given skeleton or neuron ID.')
vars = dict(neuron_id=neuron_id,
skeleton_id=skeleton_id,
source_id=source_id,
source_project_id=source_project_id,
source_url=source_url,
source_type=source_type)
# Parse variables that require unique values for each neuron
for k in ['neuron_id', 'skeleton_id', 'source_id']:
if not isinstance(vars[k], (type(None), bool)):
# Make sure it is iterable
vars[k] = list(utils._make_iterable(vars[k]))
if len(vars[k]) != len(x):
raise ValueError('Must provide "{}" for each uploaded neuron.'.format(k))
else:
vars[k] = [vars[k]] * len(x)
# Parse variables that can (but don't have to) be the same for all neurons
for k in ['source_project_id', 'source_url', 'source_type']:
if not utils._is_iterable(vars[k]):
vars[k] = [vars[k]] * len(x)
elif len(vars[k]) != len(x):
raise ValueError('Must provide "{}" for each uploaded neuron.'.format(k))
# Check if any neurons has multiple skeletons
many = [n.id for n in x if n.n_skeletons > 1]
if many:
logger.warning('Neurons with multiple disconnected skeletons'
'found: {}'.format(', '.join(many)))
answer = ""
while answer not in ["y", "n"]:
answer = input("Fragments will be joined before import. "
"Continue? [Y/N] ").lower()
if answer != 'y':
logger.warning('Import cancelled.')
return
x = ns.heal_fragmented_neuron(x, min_size=0, inplace=False)
resp = {n.id: upload_neuron(n,
neuron_id=vars['neuron_id'][i],
skeleton_id=vars['skeleton_id'][i],
import_tags=import_tags,
import_annotations=import_annotations,
import_connectors=import_connectors,
force_id=force_id,
source_id=vars['source_id'][i],
source_project_id=vars['source_project_id'][i],
source_url=vars['source_url'][i],
source_type=vars['source_type'][i],
remote_instance=remote_instance)
for i, n in config.tqdm(enumerate(x),
desc='Uploading',
total=len(x),
disable=config.pbar_hide,
leave=config.pbar_leave)}
errors = {n: r for n, r in resp.items() if 'error' in r}
if errors:
logger.error('{} error(s) during upload. Check neuron(s): '
'{}'.format(len(errors), ','.join(errors.keys())))
return resp
if not isinstance(x, ns.TreeNeuron):
raise TypeError('Expected CatmaidNeuron/List, got "{}"'.format(type(x)))
if x.nodes.empty:
raise ValueError('{} #{}: Unable to upload neuron without nodes'.format(x.name, x.id))
if x.n_skeletons > 1:
logger.warning('Neuron has multiple disconnected skeletons. Will heal'
' fragments before import!')
x = ns.heal_fragmented_neuron(x, min_size=0, inplace=False)
if source_type and source_type not in ['skeleton', 'segmentation']:
raise ValueError('Expected source_type to be "skeleton" or '
'"segmentation", got "{}"'.format(source_type))
for v, n, t in zip([source_id, source_url, source_project_id],
['source_id', 'source_url', 'source_project_id'],
[(int, np.integer), str, (int, np.integer)]):
if not isinstance(v, (type(None), t)):
raise TypeError('{} must be None or {}, got {}'.format(n, t, type(v)))
# Check if any neurons are only single nodes
# -> these need to be uploaded differently
if x.n_nodes <= 1:
if not isinstance(skeleton_id, type(None)) or not isinstance(neuron_id, type(None)):
raise ValueError('Single-node neurons can currently not be uploaded '
'with a given skeleton or neuron ID.')
node = x.nodes.iloc[0]
vars = dict(coords=node[['x', 'y', 'z']].values,
parent_id=None,
remote_instance=remote_instance)
if hasattr(node, 'confidence'):
vars['confidence'] = node.confidence if node.confidence else None
if hasattr(node, 'radius'):
vars['radius'] = node.radius
resp = add_node(**vars)
# If error is returned
if 'error' in resp:
logger.error('Error uploading neuron "{}"'.format(x.name))
return resp
# Add node ID map to match with normal upload
resp['node_id_map'] = {node.node_id: resp['treenode_id']}
else:
import_url = remote_instance._import_skeleton_url()
import_post = {'neuron_id': neuron_id,
'skeleton_id': skeleton_id,
'name': x.name,
'force': force_id,
'auto_id': False}
# Add ID fields
for k, v in zip(['source_id', 'source_url', 'source_project_id', 'source_type'],
[source_id, source_url, source_project_id, source_type]):
if v:
import_post[k] = v
f = os.path.join(tempfile.gettempdir(), 'temp.swc')
# Keep SWC node map
swc_map = ns.write_swc(x,
filename=f,
export_connectors=False,
labels=False,
return_node_map=True)
with open(f, 'rb') as file:
# Large files can cause a 504 Gateway timeout. In that case, we want
# to have a log of it without interrupting potential subsequent uploads.
try:
resp = remote_instance.fetch(import_url,
post=import_post,
files={'file': file})
except requests.exceptions.HTTPError as err:
if 'gateway time-out' in str(err).lower():
logger.debug('Gateway time-out while uploading {}. Retrying..'.format(x.name))
try:
resp = remote_instance.fetch(import_url,
post=import_post,
files={'file': file})
except requests.exceptions.HTTPError as err:
logger.error('Timeout uploading neuron "{}"'.format(x.name))
return {'error': err}
except BaseException:
raise
else:
# Any other error should just raise
raise
except BaseException:
raise
# If error is returned
if 'error' in resp:
logger.error('Error uploading neuron "{}"'.format(x.name))
return resp
# Exporting to SWC changes the node IDs -> we will revert this in the
# response of the server
n_map = {n: resp['node_id_map'].get(str(swc_map[n]), None) for n in swc_map}
resp['node_id_map'] = n_map
if import_tags and getattr(x, 'tags', {}):
# Map old to new nodes
tags = {t: [resp['node_id_map'][n] for n in v] for t, v in x.tags.items()}
# Invert tag dictionary: map node ID -> list of tags
ntags = {}
for t in tags:
ntags.update({n: ntags.get(n, []) + [t] for n in tags[t]})
resp['tags'] = add_tags(list(ntags.keys()),
ntags,
'NODE',
remote_instance=remote_instance)
# Make sure to not access `.annotations` directly to not trigger
# fetching annotations
if import_annotations and '_annotations' in x.__dict__:
an = x.__dict__.get('_annotations', [])
resp['annotations'] = add_annotations(resp['skeleton_id'], an,
remote_instance=remote_instance)
if import_connectors and x.has_connectors and not x.connectors.empty:
# Connectors that make multiple links onto the neuron will be listed
# more than once but only want to upload them once
connectors_no_duplicates = x.connectors.drop_duplicates(subset=['connector_id'])
# First create new connectors
cn_resp = add_connector(connectors_no_duplicates[['x', 'y', 'z']].values,
check_existing=reuse_existing_connectors,
remote_instance=remote_instance)
resp['connector_response'] = cn_resp
# Create old to new IDs map
cn_map = {old: new['connector_id'] for old, new in zip(connectors_no_duplicates.connector_id.values,
cn_resp)}
# Add map to server response
resp['connector_id_map'] = cn_map
# Hard-coded relation map
rl_map = config.compact_skeleton_relations
# Link connectors
links = [[resp['node_id_map'][n.node_id],
cn_map[n.connector_id],
rl_map[n.type]] for n in x.connectors.itertuples()]
ln_resp = link_connector(links, remote_instance=remote_instance)
resp['link_response'] = ln_resp
if import_tags and getattr(x, 'connector_tags', {}):
# Map old to new connectors
cn_tags = {t: [cn_map[n] for n in v] for t, v in x.connector_tags.items()}
# Invert connector tag dictionary: map connctor ID -> list of tags
ctags = {}
for t in cn_tags:
ctags.update({n: ctags.get(n, []) + [t] for n in cn_tags[t]})
resp['connector_tags'] = add_tags(list(ctags.keys()),
ctags,
'CONNECTOR',
override_existing=True,
remote_instance=remote_instance)
return resp
@cache.never_cache
def differential_upload(x, skeleton_id=None, no_prompt=False, remote_instance=None):
"""Upload only changes made to a neuron.
In brief, this function takes the input neuron ``x``, compares with its
live version on the server and makes incremental changes:
1. Remove nodes not present in ``x`` from live neuron
2. Add nodes present in ``x`` but not in live neuron
3. Move nodes present in ``x`` and live neuron that have changed positions
.. danger::
**Use this with EXTREME caution as this is irreversible!**
Parameters
----------
x : CatmaidNeuron/List
Neurons to upload.
skeleton_id : int, optional
Use this to set the target live neuron. If not
provided will will use ``x.skeleton_id``.
no_prompt : bool, optional
If True, will not prompt before uploading changes!
remote_instance : CatmaidInstance, optional
CatmaidInstance to upload to. If not passed directly,
will try using global.
Returns
-------
None
If everything went well.
dict
On error, returns dict with server response.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x, core.CatmaidNeuronList):
if len(x) > 1:
raise ValueError('Expected a single CatmaidNeuron, got {}'.format(len(x)))
x = x[0]
if not isinstance(x, core.CatmaidNeuron):
raise TypeError('Expected CatmaidNeuron, got "{}"'.format(x))
skeleton_id = x.skeleton_id if not skeleton_id else skeleton_id
# Check if neuron actually exist
if not fetch.neuron_exists(skeleton_id, remote_instance=remote_instance):
raise ValueError('No neuron with skeleton ID {} found on {} (PID )'.format(skeleton_id,
remote_instance.server,
remote_instance.project_id))
# Get live neuron
live = fetch.get_neuron(skeleton_id, remote_instance=remote_instance)
# Generate report on differences
report = _diff_report(a=x, b=live)
if not report['nodes_mutual']:
raise ValueError('Input and live neuron have no nodes in common!')
if not no_prompt:
q = 'Neuron "{}" (#{}) on {} (PID {}) will have:\n{} nodes deleted\n' \
'{} nodes moved\n{} nodes added\nPlease confirm [Y/N] '
q = q.format(live.name,
live.id,
remote_instance.server,
remote_instance.project_id,
len(report['nodes_b_only']),
len(report['nodes_moved']),
len(report['nodes_a_only']))
answer = ""
while answer not in ["y", "n"]:
answer = input(q).lower()
if answer != 'y':
return
# We need to reroot our input neuron to one of the mutual nodes so that
# the fragments we're attaching later have their roots at a node adjacent
# to the live neuron.
if not set(x.root) & set(report['nodes_mutual']):
x.reroot(report['nodes_mutual'][0], inplace=True)
# First off: delete extra nodes
# If any of this is a sequence of connected nodes, we have to delete
# them sequentially anyway - so we'll just go through the pain in any
# event
if report['nodes_b_only']:
for n in config.tqdm(report['nodes_b_only'],
desc='Removing nodes',
leave=config.pbar_leave,
disable=config.pbar_hide):
resp = delete_nodes(n, 'NODE',
no_prompt=True,
remote_instance=remote_instance)
if 'error' in resp:
# Error is already logged by delete_nodes
return resp
# Next add additional nodes
if report['nodes_a_only']:
# Generate a neuron consisting only of nodes to be added
x_ss = ns.subset_neuron(x, report['nodes_a_only'], inplace=False)
# Turn disconnected trees into separate neurons
frags = ns.break_fragments(x_ss)
# Upload each fragment and connect to live neuron
for f in config.tqdm(frags,
desc='Uploading & Joining',
leave=config.pbar_leave,
disable=config.pbar_hide):
# Single nodes can't be uploaded as SWC neurons
if f.nodes.shape[0] == 1:
parent_id = x.nodes.set_index('node_id').loc[f.root[0],
'parent_id']
coords = f.nodes.iloc[0][['x', 'y', 'z']].values
radius = f.nodes.iloc[0].radius
resp = add_node(coords,
parent_id=parent_id,
radius=radius,
remote_instance=remote_instance)
else:
# Keep track of new skeleton and node IDs
nmap = upload_neuron(f, remote_instance=remote_instance)
if 'error' in nmap:
# Error is already logged by upload_neuron
return nmap
# Now connect this fragment's root with it's former parent in
# the input neuron (which is a mutual node)
looser_node = nmap['node_id_map'][f.root[0]]
winner_node = x.nodes.set_index('node_id').loc[f.root[0],
'parent_id']
resp = join_nodes(winner_node, looser_node, no_prompt=True,
remote_instance=remote_instance)
if 'error' in resp:
# Error is already logged by join_nodes
return resp
# Last but not least: move nodes
if report['nodes_moved']:
# Generate new positions
new_locs = x.nodes.loc[x.nodes.node_id.isin(report['nodes_moved']),
['node_id', 'x', 'y', 'z']].values
resp = move_nodes(new_locs,
node_type='NODE',
no_prompt=True,
remote_instance=remote_instance)
if 'error' in resp:
# Error is already logged by move_nodes
return resp
return
@cache.never_cache
def replace_skeleton(x, skeleton_id=None, force_mapping=False,
cold_run=False, remote_instance=None):
"""Replace skeleton in CatmaidInstance.
This will override existing skeleton data and tries to map back tags
and connectors. Requires user to have import and API token write access
privileges!
Connectors are re-connected by: For each connector,
1. Get node this connector is connected to in current skeleton.
2. Get distance to the nodes up- and downstream of it as proxy for
sampling resolution.
3. Find the closest node in new skeleton.
4. Connect automatically if closest node within sampling resolution.
Else flag and return connector as "requires manual review".
Node tags are mapped back by: For each tagged node:
1. Get distance to the nodes up- and downstream of it as proxy for
sampling resolution.
2. Find the closest node in new skeleton.
3. Map tag automatically if closest node is within the sampling
resolution. Else flag and return connector as "requires manual review".
Note that this does not respect types of nodes. E.g. an "ends" tag could end
up on a non-leaf node.
Any connectors/tags that have not been automatically fixed will be returned
as DataFrame for manual review. See examples.
.. danger::
**Use this with EXTREME caution as this is irreversible!**
Parameters
----------
x : CatmaidNeuron
Neuron to update.
skeleton_id : int, optional
ID of skeleton to update. If not provided will use
``.skeleton_id`` property of input neuron.
force_mapping : bool, optional
If True, will always re-connect connectors and map tags
onto the closest node in new skeleton regardless of
distance.
cold_run : bool, optional
If True, will only calculate and return table of nodes
to fix without actually uploading anything.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame listing connectors and node tags that were
either fixed automatically or need manual review.
auto_fix type connector_id old_node_id ...
0 False 'connector' 123456 11111
1 True 'tag' None 22222
sugg_node_id relation tags x y z
0 33333 0 None ...
1 44444 None ['ends'] ...
In this example node `11111` in the old skeleton was
connected presynaptically (``relation=0``) to a
connector. The closest node in the new skeleton is
`33333` but it's too far away to be automatically
reconnected.
Node `22222` had an `ends` tag in the old skeleton and
the closest node in the new skeleton is `44444`. Because
this node was close enough, it was automatically fixed.
``x/y/z`` coordinates always refer to the position of
the old node!
Examples
--------
Pull a neuron from CATMAID, smooth it and upload it again:
>>> n = pymaid.get_neuron(16)
>>> n_smoothed = pymaid.smooth_neuron(n, inplace=False)
>>> to_fix = pymaid.replace_skeleton(n_smoothed)
Updating skeleton 16
# of nodes: 12853 -> 12853 (+0)
Cable length: 2904.2 -> 3027.3 (+123.1)
1983 of 2116 connectors will be automatically re-connected
654 of 654 tagged nodes will be automatically mapped back
Remaining connectors/tagged nodes will be returned as DataFramefor manual review
Proceed? [Y/N] Y
>>> # There are 133 items (connectors and tags) to check manually:
>>> to_fix[~to_fix.auto_fix].shape[0]
133
>>> to_fix[~to_fix.auto_fix].head()
connector_id old_node_id relation sugg_node_id tags type x y z
20 304403 125722 NaN 125717 NaN connector 452034 139101 204160
47 553830 123430 NaN 2698 NaN connector 437855 165228 216280
77 653778 123637 NaN 123636 NaN connector 450508 134607 188720
86 666783 127623 NaN 127620 NaN connector 438700 147328 219840
To facilitate fixing , we can add urls to the positions and then copy the
DataFrame to e.g. a spreadsheet:
>>> fix_manual = to_fix[~to_fix.auto_fix]
>>> fix_manual['url'] = pymaid.url_to_coordinates(coords=fix_manual,
... stack_id=5, # change this according to your projects
... active_skeleton_id=n.skeleton_id,
... active_node_id=fix_manual.sugg_node_id.values)
>>> # Copy to clipboard
>>> fix_manual.to_clipboard()
"""
# TODO:
# - use tangent vector to map connectors/tags back?
# - constrain certain tags (e.g. "ends" only on leafs)
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, core.CatmaidNeuron):
raise TypeError('Expected CatmaidNeuron, got "{}"'.format(type(x)))
if isinstance(skeleton_id, type(None)):
skeleton_id = x.skeleton_id
if not fetch.neuron_exists(skeleton_id, remote_instance=remote_instance):
raise ValueError('Neuron with skeleton ID "{}" does not exist'.format(skeleton_id))
# Get current skeleton that is should be replaced
y = fetch.get_neuron(skeleton_id, remote_instance=remote_instance)
# Because compact-skeleton does not return all types of connectors, we have
# to get them via a separate endpoint
lk = fetch.get_connector_links(skeleton_id, remote_instance=remote_instance)
# Find out which connectors we can automatically reconnect:
# First get distance between each connector node and its neighbours
cn_nodes = lk.node_id.values
g = y.graph.to_undirected()
cn_nodes_dist = []
for n in cn_nodes:
cn_nodes_dist.append(np.mean([g.edges[(n, n2)]['weight'] for n2 in g.neighbors(n)]))
cn_nodes_dist = np.array(cn_nodes_dist)
# Now find closest node in the new neuron
cn_dist_new = cdist(y.nodes.set_index('node_id').loc[cn_nodes,
['x', 'y', 'z']],
x.nodes[['x', 'y', 'z']].values)
cn_closest_ix = np.argmin(cn_dist_new, axis=1)
cn_closest_id = x.nodes.iloc[cn_closest_ix]['node_id'].values
cn_closest_dist = np.amin(cn_dist_new, axis=1)
if not force_mapping:
cn_is_close = cn_closest_dist <= cn_nodes_dist
else:
cn_is_close = cn_closest_dist <= float('inf')
# Create dictionary mapping old to new connector node ID
cn_to_tn = {int(c): int(t) for c, t in zip(cn_nodes, cn_closest_id)}
# Find out which tags we can automatically map back:
# First get distance between each tagged node and its connected nodes
tg_nodes = np.array(list(set([n for t in y.tags for n in y.tags[t]])))
tg_nodes_dist = []
for n in tg_nodes:
tg_nodes_dist.append(np.mean([g.edges[(n, n2)]['weight'] for n2 in g.neighbors(n)]))
tg_nodes_dist = np.array(tg_nodes_dist)
# Find closest node in the new neuron
tg_dist_new = cdist(y.nodes.set_index('node_id').loc[tg_nodes,
['x', 'y', 'z']].values,
x.nodes[['x', 'y', 'z']].values)
tg_closest_ix = np.argmin(tg_dist_new, axis=1)
tg_closest_id = x.nodes.iloc[tg_closest_ix]['node_id'].values
tg_closest_dist = np.amin(tg_dist_new, axis=1)
if not force_mapping:
tg_is_close = tg_closest_dist <= tg_nodes_dist
else:
tg_is_close = tg_closest_dist <= float('inf')
# Create dictionary mapping old to new node ID
tn_to_tn = {int(c): int(t) for c, t in zip(tg_nodes, tg_closest_id)}
# Compile list of items to fix after replacing skeleton in case we
# encounter an error and need to dump this
cn_to_fix = y.nodes.set_index('node_id').loc[cn_nodes, ['x', 'y', 'z']]
cn_to_fix = cn_to_fix.copy().reset_index(drop=True)
cn_to_fix['type'] = 'connector'
# Do not remove .astype(object) as this prevents conversion to float later
cn_to_fix['connector_id'] = lk.connector_id.values.astype(object)
cn_to_fix['old_node_id'] = lk.node_id.values
cn_to_fix['sugg_node_id'] = cn_to_fix.old_node_id.astype(int).map(cn_to_tn)
cn_to_fix['relation'] = lk.relation.values
cn_to_fix['auto_fix'] = cn_is_close
tg_to_fix = y.nodes.set_index('node_id').loc[tg_nodes, ['x', 'y', 'z']]
tg_to_fix = tg_to_fix.copy().reset_index(drop=True)
tg_to_fix['type'] = 'tags'
tg_to_fix['old_node_id'] = tg_nodes
tg_to_fix['sugg_node_id'] = tg_to_fix.old_node_id.astype(int).map(tn_to_tn)
tg_to_fix['tags'] = [[t for t in y.tags if n in y.tags[t]] for n in tg_nodes]
tg_to_fix['auto_fix'] = tg_is_close
# Concatenate both dataframes
to_fix = pd.concat([cn_to_fix, tg_to_fix], axis=0, sort=True).reset_index(drop=True)
if cold_run:
return to_fix
# Prepare some summary to be signed off by user
print('Updating skeleton {}: {}'.format(y.id, y.name))
print('# of nodes:\t{} -> {} ({:+})'.format(y.n_nodes,
x.n_nodes,
x.n_nodes - y.n_nodes))
print('Cable length:\t{:.1f} -> {:.1f} ({:+.1f})'.format(y.cable_length,