Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/parklab/refinery-platform
Browse files Browse the repository at this point in the history
…into develop
  • Loading branch information
scottx611x committed Mar 4, 2016
2 parents 34bbb20 + 4d4d197 commit 0b896f6
Show file tree
Hide file tree
Showing 28 changed files with 1,620 additions and 80 deletions.
3 changes: 3 additions & 0 deletions refinery/core/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@
url(r'^data_sets/(?P<data_set_uuid>'
r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/$',
'data_set', name="data_set"),
url(r'^data_sets2/(?P<data_set_uuid>'
r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/$',
'data_set2', name="data_set2"),
url(r'^data_sets/(?P<data_set_uuid>'
r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/'
r'analysis/(?P<analysis_uuid>'
Expand Down
65 changes: 65 additions & 0 deletions refinery/core/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,71 @@ def data_set(request, data_set_uuid, analysis_uuid=None):
context_instance=RequestContext(request))


def data_set2(request, data_set_uuid, analysis_uuid=None):
data_set = get_object_or_404(DataSet, uuid=data_set_uuid)
public_group = ExtendedGroup.objects.public_group()

if not request.user.has_perm('core.read_dataset', data_set):
if 'read_dataset' not in get_perms(public_group, data_set):
if request.user.is_authenticated():
return HttpResponseForbidden(
custom_error_page(request, '403.html',
{user: request.user,
'msg': "view this data set"}))
else:
return HttpResponse(
custom_error_page(request, '401.html',
{'msg': "view this data set"}),
status='401')
# get studies
investigation = data_set.get_investigation()
studies = investigation.study_set.all()
# If repository mode, only return workflows tagged for the repository
if (settings.REFINERY_REPOSITORY_MODE):
workflows = Workflow.objects.filter(show_in_repository_mode=True)
else:
workflows = Workflow.objects.all()

study_uuid = studies[0].uuid
# used for solr field postfixes: FIELDNAME_STUDYID_ASSAY_ID_FIELDTYPE
study_id = studies[0].id
assay_uuid = studies[0].assay_set.all()[0].uuid
# used for solr field postfixes: FIELDNAME_STUDYID_ASSAY_ID_FIELDTYPE
assay_id = studies[0].assay_set.all()[0].id
# TODO: catch errors
isatab_archive = None
pre_isatab_archive = None
try:
if investigation.isarchive_file is not None:
isatab_archive = FileStoreItem.objects.get(
uuid=investigation.isarchive_file)
except:
pass
try:
if investigation.pre_isarchive_file is not None:
pre_isatab_archive = FileStoreItem.objects.get(
uuid=investigation.pre_isarchive_file)
except:
pass
return render_to_response(
'core/data_set2.html',
{
"data_set": data_set,
"analysis_uuid": analysis_uuid,
"studies": studies,
"study_uuid": study_uuid,
"study_id": study_id,
"assay_uuid": assay_uuid,
"assay_id": assay_id,
"has_change_dataset_permission": 'change_dataset' in get_perms(
request.user, data_set),
"workflows": workflows,
"isatab_archive": isatab_archive,
"pre_isatab_archive": pre_isatab_archive,
},
context_instance=RequestContext(request))


def data_set_edit(request, uuid):
data_set = get_object_or_404(DataSet, uuid=uuid)

Expand Down
58 changes: 31 additions & 27 deletions refinery/data_set_manager/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
from .views import Assays, AssaysAttributes
from .utils import update_attribute_order_ranks, \
customize_attribute_response, format_solr_response, get_owner_from_assay,\
generate_facet_fields_query, hide_fields_from_weighted_list,\
generate_filtered_facet_fields, generate_solr_params, \
objectify_facet_field_counts
generate_facet_fields_query, hide_fields_from_list,\
generate_filtered_facet_fields, \
generate_solr_params, objectify_facet_field_counts
from .serializers import AttributeOrderSerializer
from core.models import DataSet, InvestigationLink

Expand Down Expand Up @@ -381,7 +381,7 @@ def setUp(self):
'solr_field': 'Character_Title',
'rank': 1,
'is_exposed': True,
'is_facet': True,
'is_facet': False,
'is_active': True,
'is_internal': False
}, {
Expand All @@ -390,7 +390,7 @@ def setUp(self):
'solr_field': 'Specimen',
'rank': 2,
'is_exposed': True,
'is_facet': True,
'is_facet': False,
'is_active': True,
'is_internal': False
}, {
Expand Down Expand Up @@ -548,31 +548,30 @@ def test_objectify_facet_field_counts(self):
'TYPE': {'Derived Data File': 105,
'Raw Data File': 9}})

def test_hide_fields_from_weighted_list(self):
weighted_list = [(0, {'solr_field': 'uuid'}),
(0, {'solr_field': 'is_annotation'}),
(2, {'solr_field': 'genome_build'}),
(3, {'solr_field': 'django_ct'}),
(6, {'solr_field': 'django_id'}),
(7, {'solr_field': 'species'}),
(8, {'solr_field': 'file_uuid'}),
(12, {'solr_field': 'study_uuid'}),
(11, {'solr_field': 'assay_uuid'}),
(10, {'solr_field': 'type'}),
(9, {'solr_field': 'id'}),
(1, {'solr_field': 'name'}),
(4, {'solr_field': 'SubAnalysis'})]
def test_hide_fields_from_list(self):
weighted_list = [{'solr_field': 'uuid'},
{'solr_field': 'is_annotation'},
{'solr_field': 'genome_build'},
{'solr_field': 'django_ct'},
{'solr_field': 'django_id'},
{'solr_field': 'species'},
{'solr_field': 'file_uuid'},
{'solr_field': 'study_uuid'},
{'solr_field': 'assay_uuid'},
{'solr_field': 'type'},
{'solr_field': 'id'},
{'solr_field': 'name'},
{'solr_field': 'SubAnalysis'}]

filtered_list = hide_fields_from_weighted_list(weighted_list)
self.assertListEqual(filtered_list, ['SubAnalysis'])
filtered_list = hide_fields_from_list(weighted_list)
self.assertListEqual(filtered_list, [{'solr_field': 'SubAnalysis'}])

def test_generate_solr_params(self):
# empty params
query = generate_solr_params(QueryDict({}), self.valid_uuid)
self.assertEqual(str(query),
'fq=assay_uuid%3A{}'
'&facet.field=Character_Title&'
'facet.field=Specimen&facet.field=Cell Type&'
'&facet.field=Cell Type&'
'facet.field=Analysis&facet.field=Organism&'
'facet.field=Cell Line&facet.field=Type&'
'facet.field=Group Name&fl=Character_Title%2C'
Expand All @@ -588,7 +587,7 @@ def test_generate_solr_params(self):
'facet.limit=-1&facet.mincount=1'.format(
self.valid_uuid))
# added parameter
parameter_dict = {'limit': 7, 'start': 2,
parameter_dict = {'limit': 7, 'offset': 2,
'include_facet_count': 'true',
'attributes': 'cats,mouse,dog,horse',
'facets': 'cats,mouse,dog,horse',
Expand Down Expand Up @@ -618,10 +617,15 @@ def test_generate_filtered_facet_fields(self):
assay__uuid=self.valid_uuid)
attributes = AttributeOrderSerializer(attribute_orders, many=True)
filtered = generate_filtered_facet_fields(attributes.data)
self.assertItemsEqual(filtered, ['Character_Title', 'Specimen',
self.assertDictEqual(filtered, {'facet_field':
['Cell Type', 'Analysis',
'Organism', 'Cell Line',
'Type', 'Group Name'],
'field_limit':
['Character_Title', 'Specimen',
'Cell Type', 'Analysis',
'Organism', 'Cell Line',
'Type', 'Group Name'])
'Type', 'Group Name']})

def test_generate_facet_fields_query(self):
facet_field_string = ['REFINERY_SUBANALYSIS_6_3_s',
Expand Down Expand Up @@ -664,7 +668,7 @@ def test_format_solr_response_valid(self):
'"Year_Characteristics_6_3_s"],'\
'"wt": "json", "rows": "20"}},'\
'"response": {'\
'"numFound": 1, "start": 0,'\
'"numFound": 1, "offset": 0,'\
'"docs": ['\
'{"Author_Characteristics_6_3_s": "Crocker",'\
'"REFINERY_ANALYSIS_UUID_6_3_s": "N/A",'\
Expand Down
55 changes: 30 additions & 25 deletions refinery/data_set_manager/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ def generate_solr_params(params, assay_uuid):
Params/Solr Params
is_annotation - metadata
facet_count/facet - enables facet counts in query response, true/false
start - paginate, offset response
offset - paginate, offset response
limit/row - maximum number of documents
field_limit - set of fields to return
facet_field - specify a field which should be treated as a facet
Expand All @@ -527,7 +527,7 @@ def generate_solr_params(params, assay_uuid):

is_annotation = params.get('is_annotation', default='false')
facet_count = params.get('include_facet_count', default='true')
start = params.get('start', default='0')
start = params.get('offset', default='0')
row = params.get('limit', default='20')
field_limit = params.get('attributes', default=None)
facet_field = params.get('facets', default=None)
Expand Down Expand Up @@ -555,16 +555,14 @@ def generate_solr_params(params, assay_uuid):
# Missing facet_fields, it is generated from Attribute Order Model.
attributes_str = AttributeOrder.objects.filter(assay__uuid=assay_uuid)
attributes = AttributeOrderSerializer(attributes_str, many=True)
facet_field = generate_filtered_facet_fields(attributes.data)
facet_field_obj = generate_filtered_facet_fields(attributes.data)
facet_field = facet_field_obj.get('facet_field')
field_limit = ','.join(facet_field_obj.get('field_limit'))
facet_field_query = generate_facet_fields_query(facet_field)
solr_params = ''.join([solr_params, facet_field_query])

if field_limit:
solr_params = ''.join([solr_params, '&fl=', field_limit])
else:
# create field_limit from facet_fields
field_limit = ','.join(facet_field)
solr_params = ''.join([solr_params, '&fl=', field_limit])

if facet_pivot:
solr_params = ''.join([solr_params, '&facet.pivot=', facet_pivot])
Expand All @@ -578,34 +576,41 @@ def generate_solr_params(params, assay_uuid):
return encoded_solr_params


def hide_fields_from_weighted_list(weighted_facet_obj):
def hide_fields_from_list(facet_obj):
"""Returns a filtered facet field list from a weighted facet object."""
hidden_fields = ["uuid", "id", "django_id", "file_uuid", "study_uuid",
"assay_uuid", "type", "is_annotation", "species",
"genome_build", "name", "django_ct"]
hidden_fields = ['uuid', 'id', 'django_id', 'file_uuid', 'study_uuid',
'assay_uuid', 'type', 'is_annotation', 'species',
'genome_build', 'name', 'django_ct']

filtered_facet_list = []
for (rank, field) in weighted_facet_obj:
solr_field = field.get("solr_field")
for field in facet_obj:
solr_field = field.get('solr_field')
if solr_field not in hidden_fields:
filtered_facet_list.append(solr_field)
filtered_facet_list.append(field)

return filtered_facet_list


def generate_filtered_facet_fields(attributes):
""" Returns a filter facet field list. Attribute order contains whether
facets should be used."""

weighted_list = []
for field in attributes:
if field.get("is_exposed") and field.get("is_facet"):
weighted_list.append((int(field["rank"]), field))

weighted_list.sort()
filtered_facet_fields = hide_fields_from_weighted_list(weighted_list)

return filtered_facet_fields
facets should be used. Based on is_exposed and is_facet."""
weighted_facet_list = []
field_limit_list = []
facet_field = []
filtered_attributes = hide_fields_from_list(attributes)

for field in filtered_attributes:
if field.get('is_exposed'):
field_limit_list.append(field.get('solr_field'))
if field.get('is_facet'):
weighted_facet_list.append((int(field['rank']), field))

weighted_facet_list.sort()
for (rank, field) in weighted_facet_list:
facet_field.append(field.get("solr_field"))

return {'facet_field': facet_field,
'field_limit': field_limit_list}


def generate_facet_fields_query(facet_fields):
Expand Down
2 changes: 1 addition & 1 deletion refinery/data_set_manager/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ class AssaysFiles(APIView):
description: Enables facet counts in query response
type: boolean
paramType: query
- name: start
- name: offset
description: Paginate offset response
type: integer
paramType: query
Expand Down
1 change: 1 addition & 0 deletions refinery/templates/base.html
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,7 @@
<script src="{% static "js/commons.js" %}"></script>
<script src="{% static "js/analysis-launch.js" %}"></script>
<script src="{% static "js/analysis-monitor.js" %}"></script>
<script src="{% static "js/file-browser.js" %}"></script>
<script src="{% static "js/node-mapping.js" %}"></script>
<script src="{% static "js/workflows.js" %}"></script>
<script src="{% static "js/solr.js" %}"></script>
Expand Down

0 comments on commit 0b896f6

Please sign in to comment.