Skip to content

Commit

Permalink
Checklist - add datasetName and creationDate (#4029)
Browse files Browse the repository at this point in the history
  • Loading branch information
dimasciput committed Jul 2, 2024
1 parent efcd1b5 commit 5cca7ee
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 3 deletions.
6 changes: 4 additions & 2 deletions bims/api_views/checklist.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
CSV_HEADER_TITLE = {
'class_name': 'Class',
'scientific_name': 'Accepted Scientific name and authority',
'cites_listing': 'CITES listing'
'cites_listing': 'CITES listing',
'park_or_mpa_name': 'Park or MPA name'
}


Expand Down Expand Up @@ -62,7 +63,8 @@ def generate_checklist(download_request_id):

csv_file_path = os.path.join(
settings.MEDIA_ROOT,
'checklists', f'checklist_{download_request_id}.csv')
'checklists',
f'checklist_{download_request_id}.csv')
os.makedirs(os.path.dirname(csv_file_path), exist_ok=True)

fieldnames = [key for key in get_serializer_keys(ChecklistSerializer) if key != 'id']
Expand Down
33 changes: 32 additions & 1 deletion bims/serializers/checklist_serializer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from datetime import datetime

from rest_framework import serializers

from bims.models import TaxonGroupTaxonomy, CITESListingInfo
Expand Down Expand Up @@ -25,6 +27,9 @@ class ChecklistSerializer(SerializerContextCache):
sources = serializers.SerializerMethodField()
cites_listing = serializers.SerializerMethodField()
confidence = serializers.SerializerMethodField()
park_or_mpa_name = serializers.SerializerMethodField()
creation_date = serializers.SerializerMethodField()
dataset = serializers.SerializerMethodField()

def taxon_name_by_rank(
self,
Expand All @@ -46,6 +51,10 @@ def taxon_name_by_rank(
return taxon_name
return '-'

def get_creation_date(self, obj: Taxonomy):
today = datetime.today()
return today.strftime('%d/%m/%Y')

def get_bio_data(self, obj: Taxonomy):
if not hasattr(self, '_bio_data_cache'):
self._bio_data_cache = {}
Expand Down Expand Up @@ -122,6 +131,20 @@ def get_most_recent_record(self, obj: Taxonomy):
return ''
return bio.order_by('collection_date').last().collection_date.year

def get_dataset(self, obj: Taxonomy):
bio = self.get_bio_data(obj)
if not bio.exists():
return ''
if bio.filter(source_collection__iexact='gbif').exists():
dataset_names = list(bio.values_list(
'additional_data__datasetName',
flat=True
))
dataset_names = [name for name in dataset_names if name is not None]
if dataset_names:
return ','.join(set(dataset_names))
return '-'

def get_sources(self, obj: Taxonomy):
bio = self.get_bio_data(obj)
if not bio.exists():
Expand All @@ -138,9 +161,14 @@ def get_sources(self, obj: Taxonomy):
except TypeError:
return ''

# TODO
def get_confidence(self, obj: Taxonomy):
return ''

# TODO
def get_park_or_mpa_name(self, obj: Taxonomy):
return '-'

def get_origin(self, obj: Taxonomy):
origin_categories = dict(Taxonomy.CATEGORY_CHOICES)
taxon_group_taxon = self.get_taxon_group_taxon_data(obj)
Expand Down Expand Up @@ -197,5 +225,8 @@ class Meta:
'national_conservation_status',
'sources',
'cites_listing',
'confidence'
'confidence',
'park_or_mpa_name',
'creation_date',
'dataset'
]

0 comments on commit 5cca7ee

Please sign in to comment.