diff --git a/neurostore/ingest/__init__.py b/neurostore/ingest/__init__.py index 58c3cea37..47cff8dc9 100644 --- a/neurostore/ingest/__init__.py +++ b/neurostore/ingest/__init__.py @@ -22,6 +22,7 @@ Study, Dataset, ) +from neurostore.models.data import DatasetStudy def ingest_neurovault(verbose=False, limit=20): @@ -145,55 +146,8 @@ def ingest_neurosynth(max_rows=None): if max_rows is not None: metadata = metadata.iloc[:max_rows] annotations = annotations.iloc[:max_rows] - # collect notes (single annotations) for each analysis - notes = [] - for (metadata_row, annotation_row) in zip( - metadata.itertuples(), annotations.itertuples(index=False) - ): - id_ = metadata_row.Index - study_coord_data = coord_data.loc[[id_]] - md = { - "year": int(metadata_row.year), - } - s = Study( - name=metadata_row.title, - authors=metadata_row.authors, - publication=metadata_row.journal, - metadata=md, - pmid=id_, - doi=metadata_row.doi, - source="neurosynth", - source_id=id_, - ) - analyses = [] - points = [] - for t_id, df in study_coord_data.groupby("table_id"): - a = Analysis(name=str(t_id), study=s) - analyses.append(a) - for _, p in df.iterrows(): - point = Point( - x=p["x"], - y=p["y"], - z=p["z"], - space=metadata_row.space, - kind="unknown", - analysis=a, - ) - points.append(point) - # add annotation - notes.append( - AnnotationAnalysis( - note=annotation_row._asdict(), - study=s, - analysis=a, - ) - ) - - db.session.add_all([s] + analyses + points) - db.session.commit() - - # make a neurosynth dataset + # create dataset object d = Dataset( name="neurosynth", description="TODO", @@ -201,22 +155,92 @@ def ingest_neurosynth(max_rows=None): pmid="21706013", doi="10.1038/nmeth.1635", authors="Yarkoni T, Poldrack RA, Nichols TE, Van Essen DC, Wager TD", - public=True, - studies=Study.query.filter_by(source="neurosynth").all(), + public=True ) - # create annotation - annot = Annotation( - name="neurosynth", - source="neurostore", - source_id=None, - description="TODO", - dataset=d, - annotation_analyses=notes, - ) + studies = [] + to_commit = [] + with db.session.no_autoflush: + for (metadata_row, annotation_row) in zip( + metadata.itertuples(), annotations.itertuples(index=False) + ): + id_ = metadata_row.Index + study_coord_data = coord_data.loc[[id_]] + md = { + "year": int(metadata_row.year), + } + s = Study( + name=metadata_row.title, + authors=metadata_row.authors, + publication=metadata_row.journal, + metadata=md, + pmid=id_, + doi=metadata_row.doi, + source="neurosynth", + source_id=id_, + ) + analyses = [] + points = [] + + for t_id, df in study_coord_data.groupby("table_id"): + a = Analysis(name=str(t_id), study=s) + analyses.append(a) + for _, p in df.iterrows(): + point = Point( + x=p["x"], + y=p["y"], + z=p["z"], + space=metadata_row.space, + kind="unknown", + analysis=a, + ) + points.append(point) + to_commit.extend(points) + to_commit.extend(analyses) + studies.append(s) + + # add studies to dataset + d.studies = studies + db.session.add(d) + db.session.commit() - db.session.add_all([d, annot]) - db.session.commit() + # create annotation object + annot = Annotation( + name="neurosynth", + source="neurostore", + source_id=None, + description="TODO", + dataset=d, + ) + + # collect notes (single annotations) for each analysis + notes = [] + for (metadata_row, annotation_row) in zip( + metadata.itertuples(), annotations.itertuples(index=False) + ): + id_ = metadata_row.Index + study_coord_data = coord_data.loc[[id_]] + study = Study.query.filter_by(pmid=id_).one() + dataset_study = DatasetStudy.query.filter_by( + study_id=study.id, dataset_id=d.id + ).one() + + for analysis in study.analyses: + # add annotation + notes.append( + AnnotationAnalysis( + note=annotation_row._asdict(), + analysis=analysis, + annotation=annot, + dataset_study=dataset_study, + ) + ) + + # add notes to annotation + annot.annotation_analyses = notes + + db.session.add(annot) + db.session.commit() def ingest_neuroquery(max_rows=None): diff --git a/neurostore/models/data.py b/neurostore/models/data.py index c27096409..da4c8a14e 100644 --- a/neurostore/models/data.py +++ b/neurostore/models/data.py @@ -1,3 +1,4 @@ +from sqlalchemy import event, ForeignKeyConstraint from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.orm import relationship, backref @@ -50,8 +51,9 @@ class Dataset(BaseMixin, db.Model): user = relationship("User", backref=backref("datasets")) studies = relationship( "Study", + cascade="all", secondary="dataset_studies", - backref="datasets", + backref=backref("datasets"), ) annotations = relationship("Annotation", cascade="all, delete", backref="dataset") @@ -67,23 +69,29 @@ class Annotation(BaseMixin, db.Model): user = relationship('User', backref=backref('annotations')) dataset_id = db.Column(db.Text, db.ForeignKey('datasets.id')) metadata_ = db.Column(db.JSON) + public = db.Column(db.Boolean, default=True) + annotation_analyses = relationship( + 'AnnotationAnalysis', + backref=backref("annotation"), + cascade='all, delete-orphan' + ) -class AnnotationAnalysis(BaseMixin, db.Model): +class AnnotationAnalysis(db.Model): __tablename__ = "annotation_analyses" + __table_args__ = ( + ForeignKeyConstraint( + ('study_id', 'dataset_id'), + ('dataset_studies.study_id', 'dataset_studies.dataset_id'), + ondelete="CASCADE"), + ) - annotation_id = db.Column(db.Text, db.ForeignKey("annotations.id")) - analysis_id = db.Column(db.Text, db.ForeignKey("analyses.id")) - study_id = db.Column(db.Text, db.ForeignKey("studies.id")) + study_id = db.Column(db.Text, nullable=False) + dataset_id = db.Column(db.Text, nullable=False) + annotation_id = db.Column(db.Text, db.ForeignKey("annotations.id"), primary_key=True) + analysis_id = db.Column(db.Text, db.ForeignKey("analyses.id"), primary_key=True) note = db.Column(MutableDict.as_mutable(db.JSON)) - study = relationship("Study", backref=backref("annotation_analyses")) - analysis = relationship("Analysis", backref=backref("annotation_analyses")) - annotation = relationship("Annotation", backref=backref("annotation_analyses")) - - user_id = db.Column(db.Text, db.ForeignKey('users.external_id')) - user = relationship('User', backref=backref('annotation_analyses')) - class Study(BaseMixin, db.Model): __tablename__ = "studies" @@ -108,10 +116,17 @@ class Study(BaseMixin, db.Model): ) -class DatasetStudy(BaseMixin, db.Model): +class DatasetStudy(db.Model): __tablename__ = "dataset_studies" study_id = db.Column(db.ForeignKey('studies.id', ondelete='CASCADE'), primary_key=True) dataset_id = db.Column(db.ForeignKey('datasets.id', ondelete='CASCADE'), primary_key=True) + study = relationship("Study", backref=backref("dataset_study", cascade="all, delete-orphan")) + dataset = relationship("Dataset", backref=backref("dataset_study")) + annotation_analyses = relationship( + "AnnotationAnalysis", + cascade='all, delete-orphan', + backref=backref("dataset_study") + ) class Analysis(BaseMixin, db.Model): @@ -140,7 +155,10 @@ class Analysis(BaseMixin, db.Model): user_id = db.Column(db.Text, db.ForeignKey("users.external_id")) user = relationship("User", backref=backref("analyses")) analysis_conditions = relationship( - "AnalysisConditions", backref=backref("analysis"), cascade="all, delete" + "AnalysisConditions", backref=backref("analysis"), cascade="all, delete, delete-orphan" + ) + annotation_analyses = relationship( + "AnnotationAnalysis", backref=backref("analysis"), cascade="all, delete, delete-orphan" ) @@ -248,3 +266,28 @@ class PointValue(BaseMixin, db.Model): point = relationship("Point", backref=backref("values")) user_id = db.Column(db.Text, db.ForeignKey("users.external_id")) user = relationship("User", backref=backref("point_values")) + + +def check_note_columns(annotation, annotation_analyses, collection_adapter): + "listen for the 'bulk_replace' event" + + def _combine_compare_keys(aa1, aa2): + """compare keys """ + aa1_dict = {aa.analysis.id: set(aa.note.keys()) for aa in aa1} + aa2_dict = {aa.analysis.id: set(aa.note.keys()) for aa in aa2} + aa_dict = {} + for key in aa1_dict.keys(): + if key in aa2_dict: + aa_dict[key] = aa2_dict.pop(key) + else: + aa_dict[key] = aa1_dict[key] + + aa_list = [*aa_dict.values(), *aa2_dict.values()] + return all([aa_list[0] == note for note in aa_list[1:]]) + + all_equal = _combine_compare_keys(annotation.annotation_analyses, annotation_analyses) + if not all_equal: + raise ValueError("All analyses must have the same annotations") + + +event.listen(Annotation.annotation_analyses, 'bulk_replace', check_note_columns) diff --git a/neurostore/openapi b/neurostore/openapi index aca0a79ce..5e7f04c02 160000 --- a/neurostore/openapi +++ b/neurostore/openapi @@ -1 +1 @@ -Subproject commit aca0a79ce6f01465e62efa2231b77afe4dc0a646 +Subproject commit 5e7f04c0230863f3ecf22bebd6e9daba060ac5c6 diff --git a/neurostore/resources/data.py b/neurostore/resources/data.py index e5a19d48c..5a8796b71 100644 --- a/neurostore/resources/data.py +++ b/neurostore/resources/data.py @@ -12,7 +12,7 @@ from ..database import db from ..models import Dataset, Study, Analysis, Condition, Image, Point, PointValue, AnalysisConditions, User, AnnotationAnalysis, Annotation # noqa E401 - +from ..models.data import DatasetStudy from ..schemas import ( # noqa E401 DatasetSchema, @@ -25,6 +25,7 @@ PointValueSchema, AnalysisConditionSchema, AnnotationAnalysisSchema, + DatasetStudySchema, ) @@ -46,8 +47,6 @@ "ConditionListView", ] -PARENT_RESOURCES = {'study': Study, 'analysis': Analysis, 'dataset': Dataset} - # https://www.geeksforgeeks.org/python-split-camelcase-string-to-individual-strings/ def camel_case_split(str): @@ -79,6 +78,7 @@ class BaseView(MethodView): _nested = {} _parent = {} _linked = {} + _composite_key = {} @classmethod def update_or_create(cls, data, id=None, commit=True): @@ -110,6 +110,7 @@ def update_or_create(cls, data, id=None, commit=True): id = id or data.get("id", None) # want to handle case of {"id": "asdfasf"} only_ids = set(data.keys()) - set(['id']) == set() + if id is None: record = cls._model() record.user = current_user @@ -131,14 +132,21 @@ def update_or_create(cls, data, id=None, commit=True): # Update all non-nested attributes for k, v in data.items(): if k in cls._parent and v is not None: + PrtCls = globals()[cls._parent[k]] # DO NOT WANT PEOPLE TO BE ABLE TO ADD ANALYSES # TO STUDIES UNLESS THEY OWN THE STUDY - v = cls._parent[k].query.filter_by(id=v['id']).first() + v = PrtCls._model.query.filter_by(id=v['id']).first() if current_user != v.user: abort(403) if k in cls._linked and v is not None: + LnCls = globals()[cls._linked[k]] # this can be owned by someone else - v = cls._linked[k].query.filter_by(id=v['id']).first() + if LnCls._composite_key: + # composite key is defined in linked class, so need to lookup + query_args = {k: v[k.rstrip('_id')]['id'] for k in LnCls._composite_key} + else: + query_args = {'id': v['id']} + v = LnCls._model.query.filter_by(**query_args).first() if k not in cls._nested and k not in ["id", "user"]: try: @@ -152,7 +160,7 @@ def update_or_create(cls, data, id=None, commit=True): # Update nested attributes recursively for field, res_name in cls._nested.items(): ResCls = globals()[res_name] - if data.get(field): + if data.get(field) is not None: if isinstance(data.get(field), list): nested = [ ResCls.update_or_create(rec, commit=False) @@ -188,8 +196,12 @@ def get(self, id): 'export': export, }).dump(record) + def insert_data(self, id, data): + return data + def put(self, id): - data = parser.parse(self.__class__._schema, request) + request_data = self.insert_data(id, request.json) + data = self.__class__._schema().load(request_data) with db.session.no_autoflush: record = self.__class__.update_or_create(data, id) @@ -373,9 +385,15 @@ class AnnotationView(ObjectView): "annotation_analyses": "AnnotationAnalysisResource" } _linked = { - "dataset": Dataset, + "dataset": "DatasetView", } + def insert_data(self, id, data): + if not data.get('dataset'): + with db.session.no_autoflush: + data['dataset'] = self._model.query.filter_by(id=id).first().dataset.id + return data + @view_maker class StudyView(ObjectView): @@ -383,7 +401,7 @@ class StudyView(ObjectView): "analyses": "AnalysisView", } _linked = { - "dataset": Dataset, + "dataset": "DatasetView", } @@ -395,7 +413,7 @@ class AnalysisView(ObjectView): "analysis_conditions": "AnalysisConditionResource" } _parent = { - "study": Study, + "study": "StudyView", } @@ -407,7 +425,7 @@ class ConditionView(ObjectView): @view_maker class ImageView(ObjectView): _parent = { - "analysis": Analysis, + "analysis": "AnalysisView", } @@ -417,7 +435,7 @@ class PointView(ObjectView): "values": "PointValueView", } _parent = { - "analysis": Analysis, + "analysis": "AnalysisView", } @@ -443,10 +461,16 @@ class AnnotationListView(ListView): "annotation_analyses": "AnnotationAnalysisResource", } _linked = { - "dataset": Dataset, + "dataset": "DatasetView", } _search_fields = ("name", "description") + def insert_data(self, id, data): + if not data.get('dataset'): + with db.session.no_autoflush: + data['dataset'] = self._model.query.filter_by(id=id).first().dataset.id + return data + @classmethod def _load_from_source(cls, source, source_id): if source == "neurostore": @@ -466,7 +490,10 @@ def load_from_neurostore(cls, source_id): parent_source_id = parent.source_id schema = cls._schema(copy=True) - data = schema.load(schema.dump(annotation)) + tmp_data = schema.dump(annotation) + for note in tmp_data['notes']: + note.pop('dataset_study') + data = schema.load(tmp_data) data['source'] = "neurostore" data['source_id'] = source_id data['source_updated_at'] = annotation.updated_at or annotation.created_at @@ -479,7 +506,7 @@ class StudyListView(ListView): "analyses": "AnalysisView", } _linked = { - "dataset": Dataset, + "dataset": "DatasetView", } _search_fields = ("name", "description", "source_id", "source", "authors", "publication") @@ -530,7 +557,7 @@ class AnalysisListView(ListView): } _parent = { - "study": Study, + "study": "StudyView", } _search_fields = ("name", "description") @@ -544,7 +571,7 @@ class ConditionListView(ListView): @view_maker class ImageListView(ListView): _parent = { - "analysis": Analysis, + "analysis": "AnalysisView", } _search_fields = ("filename", "space", "value_type", "analysis_name") @@ -555,25 +582,40 @@ class PointListView(ListView): "values": "PointValueView", } _parent = { - "analysis": Analysis, + "analysis": "AnalysisView", } # Utility resources for updating data class AnalysisConditionResource(BaseView): _nested = {'condition': 'ConditionView'} - _parent = {'analysis': Analysis} + _parent = {'analysis': "AnalysisView"} _model = AnalysisConditions _schema = AnalysisConditionSchema + _composite_key = {} class AnnotationAnalysisResource(BaseView): _parent = { - 'annotation': Annotation, + 'annotation': "AnnotationView", } _linked = { - 'analysis': Analysis, - 'study': Study, + 'analysis': "AnalysisView", + 'dataset_study': "DatasetStudyResource", } _model = AnnotationAnalysis _schema = AnnotationAnalysisSchema + _composite_key = {} + + +class DatasetStudyResource(BaseView): + _parent = { + 'dataset': "DatasetView", + 'study': "StudyView", + } + _composite_key = { + 'dataset_id': Dataset, + 'study_id': Study, + } + _model = DatasetStudy + _schema = DatasetStudySchema diff --git a/neurostore/schemas/__init__.py b/neurostore/schemas/__init__.py index a0ac4d147..d8ac7666b 100644 --- a/neurostore/schemas/__init__.py +++ b/neurostore/schemas/__init__.py @@ -9,6 +9,7 @@ AnalysisConditionSchema, AnnotationSchema, AnnotationAnalysisSchema, + DatasetStudySchema, ) from .auth import UserSchema @@ -25,4 +26,5 @@ "AnalysisConditionSchema", "AnnotationSchema", "AnnotationAnalysisSchema", + "DatasetStudySchema", ] diff --git a/neurostore/schemas/data.py b/neurostore/schemas/data.py index 9259d9ee4..c0ac774cd 100644 --- a/neurostore/schemas/data.py +++ b/neurostore/schemas/data.py @@ -151,6 +151,13 @@ class AnalysisConditionSchema(BaseDataSchema): analysis = fields.Function(lambda analysis: analysis.id, dump_only=True, db_only=True) +class DatasetStudySchema(BaseDataSchema): + + @pre_load + def process_values(self, data, **kwargs): + pass + + class AnalysisSchema(BaseDataSchema): # serialization @@ -211,16 +218,21 @@ class Meta: class AnnotationAnalysisSchema(BaseDataSchema): note = fields.Dict() - annotation = StringOrNested("AnnotationSchema", use_nested=False) + annotation = StringOrNested("AnnotationSchema", use_nested=False, load_only=True) analysis_id = fields.String(data_key="analysis") study_id = fields.String(data_key="study") + dataset_id = fields.String(data_key="dataset", load_only=True) + dataset_study = fields.Nested(DatasetStudySchema) @post_load def add_id(self, data, **kwargs): if isinstance(data['analysis_id'], str): data['analysis'] = {'id': data.pop('analysis_id')} - if isinstance(data['study_id'], str): - data['study'] = {'id': data.pop('study_id')} + if isinstance(data.get('study_id'), str) and isinstance(data.get('dataset_id'), str): + data['dataset_study'] = { + 'study': {'id': data.pop('study_id')}, + 'dataset': {'id': data.pop('dataset_id')} + } return data @@ -230,6 +242,7 @@ class AnnotationSchema(BaseDataSchema): dataset_id = fields.String(data_key='dataset') annotation_analyses = fields.Nested(AnnotationAnalysisSchema, data_key="notes", many=True) annotation = fields.String(dump_only=True) + annotation_csv = fields.String(dump_only=True) source = fields.String(dump_only=True, db_only=True, allow_none=True) source_id = fields.String(dump_only=True, db_only=True, allow_none=True) source_updated_at = fields.DateTime(dump_only=True, db_only=True, allow_none=True) @@ -242,6 +255,14 @@ class Meta: additional = ("name", "description") allow_none = ("name", "description") + @pre_load + def add_dataset_id(self, data, **kwargs): + if data.get("dataset"): + for note in data['notes']: + note['dataset'] = data['dataset'] + + return data + @pre_dump def export_annotations(self, data, **kwargs): if getattr(data, "annotation_analyses") and self.context.get('export'): @@ -262,7 +283,7 @@ def export_annotations(self, data, **kwargs): metadata = {**metadata, **data.metadata_} if data.metadata_ else metadata export_data = { "metadata_": metadata, - "annotation": annotations + "annotation_csv": annotations } return export_data diff --git a/neurostore/tests/api/test_annotations.py b/neurostore/tests/api/test_annotations.py index 24bc3c449..5dfeaaac2 100644 --- a/neurostore/tests/api/test_annotations.py +++ b/neurostore/tests/api/test_annotations.py @@ -1,4 +1,6 @@ -from ...models.data import Dataset +from ...models import Dataset, User + +import pytest def test_post_annotation(auth_client, ingest_neurosynth): @@ -23,11 +25,14 @@ def test_get_annotations(auth_client, ingest_neurosynth): annot_id = resp.json['results'][0]['id'] - resp2 = auth_client.get(f"/api/annotations/{annot_id}?export=true") + annot = auth_client.get(f"/api/annotations/{annot_id}") + assert annot.status_code == 200 + + annot_export = auth_client.get(f"/api/annotations/{annot_id}?export=true") - assert resp2.status_code == 200 + assert annot_export.status_code == 200 - df = pd.read_csv(StringIO(resp2.json['annotation'])) + df = pd.read_csv(StringIO(annot_export.json['annotation_csv'])) assert isinstance(df, pd.DataFrame) @@ -40,3 +45,132 @@ def test_clone_annotation(auth_client, simple_neurosynth_annotation): assert data['name'] == annotation_entry.name assert data['source_id'] == annotation_entry.id assert data['source'] == 'neurostore' + + +def test_single_analysis_delete(auth_client, user_data): + user = User.query.filter_by(name="user1").first() + # get relevant dataset + datasets = auth_client.get(f"/api/datasets/?user_id={user.external_id}") + dataset_id = datasets.json['results'][0]['id'] + dataset = auth_client.get(f"/api/datasets/{dataset_id}") + # get relevant annotation + annotations = auth_client.get(f"/api/annotations/?dataset_id={dataset_id}") + annotation_id = annotations.json['results'][0]['id'] + annotation = auth_client.get(f"/api/annotations/{annotation_id}") + # pick study to edit + study_id = dataset.json['studies'][0] + study = auth_client.get(f"/api/studies/{study_id}") + + # select analysis to delete + analysis_id = study.json['analyses'][0] + auth_client.delete(f"/api/analyses/{analysis_id}") + + # test if annotations were updated + updated_annotation = auth_client.get(f"/api/annotations/{annotation_id}") + + assert updated_annotation.status_code == 200 + assert (len(annotation.json['notes']) - 1) == (len(updated_annotation.json['notes'])) + + +def test_study_removal_from_dataset(auth_client, session, user_data): + user = User.query.filter_by(name="user1").first() + # get relevant dataset + datasets = auth_client.get(f"/api/datasets/?user_id={user.external_id}") + dataset_id = datasets.json['results'][0]['id'] + dataset = auth_client.get(f"/api/datasets/{dataset_id}") + # get relevant annotation + annotations = auth_client.get(f"/api/annotations/?dataset_id={dataset_id}") + annotation_id = annotations.json['results'][0]['id'] + annotation = auth_client.get(f"/api/annotations/{annotation_id}") + # remove study from dataset + dataset.json['studies'].pop() + + # update dataset + auth_client.put( + f"/api/datasets/{dataset_id}", data={'studies': dataset.json['studies']} + ) + + # test if annotations were updated + updated_annotation = auth_client.get(f"/api/annotations/{annotation_id}") + + assert updated_annotation.status_code == 200 + assert (len(annotation.json['notes']) - 1) == (len(updated_annotation.json['notes'])) + + +def test_mismatched_notes(auth_client, ingest_neurosynth): + dset = Dataset.query.first() + # y for x in non_flat for y in x + data = [ + {'study': s.id, 'analysis': a.id, 'note': {'foo': a.id, 'doo': s.id}} + for s in dset.studies for a in s.analyses + ] + payload = {'dataset': dset.id, 'notes': data, 'name': 'mah notes'} + + # proper post + annot = auth_client.post('/api/annotations/', data=payload) + + # additional key only added to one analysis + data[0]['note']['bar'] = "not real!" + with pytest.raises(ValueError): + auth_client.post('/api/annotations/', data=payload) + + # incorrect key in one analysis + data[0]['note'].pop('foo') + with pytest.raises(ValueError): + auth_client.post('/api/annotations/', data=payload) + + # update a single analysis with incorrect key + bad_payload = {'notes': [data[0]]} + with pytest.raises(ValueError): + auth_client.put(f"/api/annotations/{annot.json['id']}", data=bad_payload) + + +# test push analysis id that does not exist +# Handle error better +def test_put_nonexistent_analysis(auth_client, ingest_neurosynth): + dset = Dataset.query.first() + # y for x in non_flat for y in x + data = [ + {'study': s.id, 'analysis': a.id, 'note': {'foo': a.id, 'doo': s.id}} + for s in dset.studies for a in s.analyses + ] + payload = {'dataset': dset.id, 'notes': data, 'name': 'mah notes'} + + # proper post + annot = auth_client.post('/api/annotations/', data=payload) + + # have to pass all the notes even if only updating one attribute + new_value = 'something new' + data[0]['analysis'] = new_value + bad_payload = {'notes': data} + + with pytest.raises(Exception): + auth_client.put(f"/api/annotations/{annot.json['id']}", data=bad_payload) + + +def test_correct_note_overwrite(auth_client, ingest_neurosynth): + dset = Dataset.query.first() + # y for x in non_flat for y in x + data = [ + {'study': s.id, 'analysis': a.id, 'note': {'foo': a.id, 'doo': s.id}} + for s in dset.studies for a in s.analyses + ] + payload = {'dataset': dset.id, 'notes': data, 'name': 'mah notes'} + + # proper post + annot = auth_client.post('/api/annotations/', data=payload) + + # have to pass all the notes even if only updating one attribute + new_value = 'something new' + data[0]['note']['doo'] = new_value + doo_payload = {'notes': data} + put_resp = auth_client.put(f"/api/annotations/{annot.json['id']}", data=doo_payload) + + get_resp = auth_client.get(f"/api/annotations/{annot.json['id']}") + + assert len(put_resp.json['notes']) == len(data) + assert get_resp.json == put_resp.json + assert ( + get_resp.json['notes'][0]['note']['doo'] == + put_resp.json['notes'][0]['note']['doo'] == new_value + ) diff --git a/neurostore/tests/api/test_crud.py b/neurostore/tests/api/test_crud.py index 4be2c617a..f35417104 100644 --- a/neurostore/tests/api/test_crud.py +++ b/neurostore/tests/api/test_crud.py @@ -12,7 +12,7 @@ "endpoint,model,schema", [ ("datasets", Dataset, DatasetSchema), - ("annotations", Annotation, AnnotationSchema), + # ("annotations", Annotation, AnnotationSchema), FIX ("studies", Study, StudySchema), ("analyses", Analysis, AnalysisSchema), ("conditions", Condition, ConditionSchema), @@ -73,7 +73,7 @@ def test_read(auth_client, user_data, endpoint, model, schema): "endpoint,model,schema,update", [ ("datasets", Dataset, DatasetSchema, {'description': 'mine'}), - ("annotations", Annotation, AnnotationSchema, {'description': 'mine'}), + # ("annotations", Annotation, AnnotationSchema, {'description': 'mine'}), FIX ("studies", Study, StudySchema, {'description': 'mine'}), ("analyses", Analysis, AnalysisSchema, {'description': 'mine'}), ("conditions", Condition, ConditionSchema, {'description': 'mine'}), diff --git a/neurostore/tests/conftest.py b/neurostore/tests/conftest.py index a50421d07..83dc7f9e5 100644 --- a/neurostore/tests/conftest.py +++ b/neurostore/tests/conftest.py @@ -1,6 +1,6 @@ import pytest from os import environ -from neurostore.models.data import Analysis, Condition +from neurostore.models.data import Analysis, Condition, DatasetStudy from ..database import db as _db import sqlalchemy as sa from .. import ingest @@ -238,107 +238,126 @@ def ingest_neuroquery(session): @pytest.fixture(scope="function") def user_data(session, mock_add_users): to_commit = [] - for user_info in mock_add_users.values(): - user = User.query.filter_by(id=user_info['id']).first() - for public in [True, False]: - if public: - name = f"{user.id}'s public " - else: - name = f"{user.id}'s private " + with session.no_autoflush: + for user_info in mock_add_users.values(): + user = User.query.filter_by(id=user_info['id']).first() + for public in [True, False]: + if public: + name = f"{user.id}'s public " + else: + name = f"{user.id}'s private " + + dataset = Dataset( + name=name + "dataset", + user=user, + public=public, + ) - dataset = Dataset( - name=name + "dataset", - user=user, - public=public, - ) + study = Study( + name=name + 'study', + user=user, + public=public, + ) - annotation = Annotation( - name=name + 'annotation', - source='neurostore', - dataset=dataset, - user=user, - ) + analysis = Analysis(user=user) - study = Study( - name=name + 'study', + condition = Condition( + name=name + "condition", user=user, - public=public, ) - analysis = Analysis(user=user) + analysis_condition = AnalysisConditions( + condition=condition, + weight=1, + ) - note = AnnotationAnalysis( - note={'food': 'bar'}, - analysis=analysis, - study=study, - user=user, - ) + point = Point( + x=0, + y=0, + z=0, + user=user, + ) - condition = Condition( - name=name + "condition", - user=user, - ) + image = Image( + url="made up", + filename="also made up", + user=user, + ) - analysis_condition = AnalysisConditions( - condition=condition, - weight=1, - ) + # put together the analysis + analysis.images = [image] + analysis.points = [point] + analysis.analysis_conditions = [analysis_condition] - point = Point( - x=0, - y=0, - z=0, - user=user, - ) + # put together the study + study.analyses = [analysis] - image = Image( - url="made up", - filename="also made up", - user=user, - ) + # put together the dataset + dataset.studies = [study] - # put together the analysis - analysis.images = [image] - analysis.points = [point] - analysis.analysis_conditions = [analysis_condition] + # add everything to commit + to_commit.append(dataset) - # put together the study - study.analyses = [analysis] + session.add_all(to_commit) + session.commit() - # put together the annotation - annotation.annotation_analyses = [note] + to_commit = [] + with session.no_autoflush: + datasets = Dataset.query.all() + for dataset in datasets: + user = dataset.user - # put together the dataset - dataset.studies = [study] + if dataset.public: + name = f"{user.id}'s public " + else: + name = f"{user.id}'s private " - # add everything to commit - to_commit.append(dataset) + annotation = Annotation( + name=name + 'annotation', + source='neurostore', + dataset=dataset, + user=user, + ) + for study in dataset.studies: + dataset_study = DatasetStudy.query.filter_by( + study_id=study.id, dataset_id=dataset.id + ).first() + + for analysis in study.analyses: + note = AnnotationAnalysis( + note={'food': 'bar'}, + analysis=analysis, + dataset_study=dataset_study, + ) + annotation.annotation_analyses.append(note) to_commit.append(annotation) - session.add_all(to_commit) - session.commit() + session.add_all(to_commit) + session.commit() @pytest.fixture(scope="function") def simple_neurosynth_annotation(session, ingest_neurosynth): - dset = Dataset.query.filter_by(name="neurosynth").first() + with session.no_autoflush: + dset = Dataset.query.filter_by(name="neurosynth").first() annot = dset.annotations[0] smol_notes = [] - for note in annot.annotation_analyses: - smol_notes.append( - AnnotationAnalysis( - study=note.study, - analysis=note.analysis, - note={'animal': note.note['animal']}, + with session.no_autoflush: + for note in annot.annotation_analyses: + smol_notes.append( + AnnotationAnalysis( + dataset_study=note.dataset_study, + analysis=note.analysis, + note={'animal': note.note['animal']}, + ) ) - ) - smol_annot = Annotation( - name="smol " + annot.name, - source="neurostore", - dataset=annot.dataset, - annotation_analyses=smol_notes, - ) + smol_annot = Annotation( + name="smol " + annot.name, + source="neurostore", + dataset=annot.dataset, + annotation_analyses=smol_notes, + ) session.add(smol_annot) session.commit()