diff --git a/omero/annotation_scripts/MIF/Key_Val_from_Description.py b/omero/annotation_scripts/01-KeyVal_from_Description.py similarity index 94% rename from omero/annotation_scripts/MIF/Key_Val_from_Description.py rename to omero/annotation_scripts/01-KeyVal_from_Description.py index f177a03f1..05dddabee 100644 --- a/omero/annotation_scripts/MIF/Key_Val_from_Description.py +++ b/omero/annotation_scripts/01-KeyVal_from_Description.py @@ -28,7 +28,6 @@ @since 5.3 """ -from __future__ import print_function import sys, os import re @@ -72,7 +71,7 @@ def RemoveMapAnnotations(conn, dtype, Id ): conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, failontimeout=False, closehandle=False) - except Exception, ex: + except Exception as ex: print("Failed to delete links: {}".format(ex.message)) return @@ -102,7 +101,7 @@ def AddKeysToMatchingFiles( conn, Id, global_kv, template, file_keys, spec_kv=No existing_kv = GetExistingMapAnnotions(image) updated_kv = copy.deepcopy(existing_kv) - for key,vals in global_kv.iteritems(): + for key,vals in global_kv.items(): if key not in updated_kv: updated_kv[key] = set() for val in vals: updated_kv[key].add(val) @@ -129,19 +128,12 @@ def AddKeysToMatchingFiles( conn, Id, global_kv, template, file_keys, spec_kv=No updated_kv[key].add(val) if( spec_kv is not None ): - for key,vals in spec_kv.iteritems(): + for key,vals in spec_kv.items(): if key not in updated_kv: updated_kv[key] = set() for val in vals: updated_kv[key].add(val) - #print("existing_kv") - #for k,v in existing_kv.iteritems(): - # print(" {} : {}".format(k,v)) - #print("updated_kv") - #for k,v in updated_kv.iteritems(): - # print(" {} : {}".format(k,v)) - #print("Are they the same?",existing_kv == updated_kv ) nold_i = sum(map( len, existing_kv.values())) nnew_i = sum(map( len, updated_kv.values())) nkv_added = nkv_added+(nnew_i+nold_i) @@ -151,11 +143,11 @@ def AddKeysToMatchingFiles( conn, Id, global_kv, template, file_keys, spec_kv=No RemoveMapAnnotations( conn, 'image', image.getId() ) map_ann = omero.gateway.MapAnnotationWrapper(conn) namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION - namespace = "openmicroscopy.org/mapr/gene" + # namespace = "openmicroscopy.org/mapr/gene" map_ann.setNs(namespace) # convert the ordered dict to a list of lists kv_list=[] - for k,vset in updated_kv.iteritems(): + for k,vset in updated_kv.items(): for v in vset: kv_list.append( [k,v] ) map_ann.setValue( kv_list ) @@ -268,9 +260,6 @@ def AddMapAnnotations(conn, dtype, Id ): val = match.group(2) if( key not in spec_kv ): spec_kv[key]=set() spec_kv[key].add(val) - #print("Global k-v's") - #for k,v in global_kv.iteritems(): - # print( k,v) # now add the key value pairs to the dataset existing_kv = GetExistingMapAnnotions(dataset) @@ -278,11 +267,11 @@ def AddMapAnnotations(conn, dtype, Id ): RemoveMapAnnotations( conn, 'dataset', dataset.getId() ) map_ann = omero.gateway.MapAnnotationWrapper(conn) namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION - namespace = "openmicroscopy.org/mapr/gene" + # namespace = "openmicroscopy.org/mapr/gene" map_ann.setNs(namespace) # convert the ordered dict to a list of lists kv_list=[] - for k,vset in global_kv.iteritems(): + for k,vset in global_kv.items(): for v in vset: kv_list.append( [k,v] ) map_ann.setValue( kv_list ) @@ -325,10 +314,10 @@ def AddMapAnnotations(conn, dtype, Id ): updated_kv[key].add(val) print("existing_kv") - for k,v in existing_kv.iteritems(): + for k,v in existing_kv.items(): print(" {} : {}".format(k,v)) print("updated_kv") - for k,v in updated_kv.iteritems(): + for k,v in updated_kv.items(): print(" {} : {}".format(k,v)) print("Are they the same?",existing_kv == updated_kv ) @@ -337,13 +326,13 @@ def AddMapAnnotations(conn, dtype, Id ): print("The key-values pairs are different") RemoveMapAnnotations( conn, 'image', image.getId() ) map_ann = omero.gateway.MapAnnotationWrapper(conn) - namespace ="openmicroscopy.org/mapr/gene" + # namespace ="openmicroscopy.org/mapr/gene" map_ann.setNs(namespace) print("Namespace") print(map_ann) # convert the ordered dict to a list of lists kv_list=[] - for k,vset in updated_kv.iteritems(): + for k,vset in updated_kv.items(): for v in vset: kv_list.append( [k,v] ) map_ann.setValue( kv_list ) @@ -454,5 +443,8 @@ def getObjects(conn, scriptParams): # " details") # # Insight will display the 'Message' parameter #client.setOutput("Message", rstring(message)) + except: + pass + finally: client.closeSession() diff --git a/omero/annotation_scripts/MIF/Key_Val_to_csv.py b/omero/annotation_scripts/02-KeyVal_to_csv.py similarity index 96% rename from omero/annotation_scripts/MIF/Key_Val_to_csv.py rename to omero/annotation_scripts/02-KeyVal_to_csv.py index 64cf47fda..5b111e279 100644 --- a/omero/annotation_scripts/MIF/Key_Val_to_csv.py +++ b/omero/annotation_scripts/02-KeyVal_to_csv.py @@ -73,8 +73,8 @@ def attach_csv_file( conn, obj, data ): # get the list of keys and maximum number of occurences # A key can appear multiple times, for example multiple dyes can be used key_union=OrderedDict() - for img_n,img_kv in data.iteritems(): - for key, vset in img_kv.iteritems(): + for img_n,img_kv in data.items(): + for key, vset in img_kv.items(): key_union[key] = max(key_union.get(key,0),len(vset)) all_keys = key_union.keys() @@ -86,15 +86,15 @@ def to_csv( ll ): # construct the header of the CSV file header = ['filename'] - for key,count in key_union.iteritems(): + for key,count in key_union.items(): header.extend( [key]*count ) # keys can repeat multiple times tfile.write( to_csv( header ) ) # write the keys values for each file - for filename,kv_dict in data.iteritems(): + for filename,kv_dict in data.items(): row = [""]*len(header) # empty row row[0] = filename - for key,vset, in kv_dict.iteritems(): + for key,vset, in kv_dict.items(): n0 = header.index(key) # first occurence of key in header for i,val in enumerate(vset): row[n0+i] = val @@ -173,7 +173,7 @@ def run_script(): conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, failontimeout=False, closehandle=False) print("Deleted existing csv") - except Exception, ex: + except Exception as ex: print("Failed to delete existing csv: {}".format(ex.message)) else: print("No exisiting file") @@ -191,6 +191,9 @@ def run_script(): print(mess) mess="done" client.setOutput("Message", rstring(mess)) + + except: + pass finally: client.closeSession() diff --git a/omero/annotation_scripts/MIF/Key_Val_from_csv.py b/omero/annotation_scripts/03-KeyVal_from_csv.py similarity index 97% rename from omero/annotation_scripts/MIF/Key_Val_from_csv.py rename to omero/annotation_scripts/03-KeyVal_from_csv.py index 046155888..2ed2d7201 100644 --- a/omero/annotation_scripts/MIF/Key_Val_from_csv.py +++ b/omero/annotation_scripts/03-KeyVal_from_csv.py @@ -82,7 +82,7 @@ def remove_MapAnnotations(conn, dtype, Id ): conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, failontimeout=False, closehandle=False) - except Exception, ex: + except Exception as ex: print("Failed to delete links: {}".format(ex.message)) return @@ -162,7 +162,7 @@ def populate_metadata(client, conn, script_params): existing_kv = get_existing_MapAnnotions( img ) updated_kv = copy.deepcopy(existing_kv) print("Existing kv ") - for k,vset in existing_kv.iteritems(): + for k,vset in existing_kv.items(): print(type(vset),len(vset)) for v in vset: print(k,v) @@ -187,7 +187,7 @@ def populate_metadata(client, conn, script_params): map_ann.setNs(namespace) # convert the ordered dict to a list of lists kv_list=[] - for k,vset in updated_kv.iteritems(): + for k,vset in updated_kv.items(): for v in vset: kv_list.append( [k,v] ) map_ann.setValue(kv_list) @@ -234,12 +234,14 @@ def run_script(): # wrap client to use the Blitz Gateway conn = BlitzGateway(client_obj=client) - message="here I am" - print "scaript params" - for k,v in script_params.iteritems(): - print k,v + print("script params") + for k,v in script_params.items(): + print(k,v) message = populate_metadata(client, conn, script_params) client.setOutput("Message", rstring(message)) + + except: + pass finally: client.closeSession() diff --git a/omero/annotation_scripts/MIF/Key_Val_remove.py b/omero/annotation_scripts/04-Remove_KeyVal.py similarity index 98% rename from omero/annotation_scripts/MIF/Key_Val_remove.py rename to omero/annotation_scripts/04-Remove_KeyVal.py index b656c4af2..1a4e3bcce 100644 --- a/omero/annotation_scripts/MIF/Key_Val_remove.py +++ b/omero/annotation_scripts/04-Remove_KeyVal.py @@ -27,8 +27,6 @@ @since 5.3.3 """ -from __future__ import print_function - from omero.gateway import BlitzGateway import omero @@ -55,7 +53,7 @@ def RemoveMapAnnotations(conn, dtype, Id ): conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, failontimeout=False, closehandle=False) return 0 - except Exception, ex: + except Exception as ex: print("Failed to delete links: {} ".format(ex.message) ) return 1 return @@ -163,6 +161,9 @@ def getObjects(conn, scriptParams): nobjs = len(objs) message = "Key value data deleted from {} of {} files".format( nobjs-nfailed, nobjs) client.setOutput("Message", rstring(message)) + + except: + pass finally: client.closeSession() diff --git a/omero/annotation_scripts/MIF/Key_Val_from_FileName.py b/omero/annotation_scripts/05-KeyVal_from_Filename.py similarity index 95% rename from omero/annotation_scripts/MIF/Key_Val_from_FileName.py rename to omero/annotation_scripts/05-KeyVal_from_Filename.py index 9a73c8523..ad93b432a 100644 --- a/omero/annotation_scripts/MIF/Key_Val_from_FileName.py +++ b/omero/annotation_scripts/05-KeyVal_from_Filename.py @@ -28,7 +28,6 @@ @since 5.3 """ -from __future__ import print_function import sys, os import re @@ -70,7 +69,7 @@ def RemoveMapAnnotations(conn, dtype, Id ): conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, failontimeout=False, closehandle=False) - except Exception, ex: + except Exception as ex: print("Failed to delete links: {}".format(ex.message)) return @@ -103,7 +102,7 @@ def AddMapAnnotations(conn, dtype, Id ): for line in description: # 1. See if this is a mode string - for key,value in modes.iteritems(): + for key,value in modes.items(): match = re.search( "^#\s+{}".format(key),line.lower()) if( match is not None ): mode = value @@ -140,7 +139,7 @@ def AddMapAnnotations(conn, dtype, Id ): file_keys[i] = match.group(2) print("Global k-v's") - for k,v in global_kv.iteritems(): + for k,v in global_kv.items(): print( k,v) # convert the template to a regexp @@ -163,7 +162,7 @@ def AddMapAnnotations(conn, dtype, Id ): map_ann = omero.gateway.MapAnnotationWrapper(conn) namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION map_ann.setNs(namespace) - map_ann.setValue( [ [k,v] for k,v in global_kv.iteritems() ] ) + map_ann.setValue( [ [k,v] for k,v in global_kv.items() ] ) map_ann.save() dataset.linkAnnotation(map_ann) @@ -187,10 +186,6 @@ def AddMapAnnotations(conn, dtype, Id ): filename = path+"/"+name match = regexp.search(filename) - # extract the keys - #for i,key in file_keys.iteritems(): - # val = match.group(int(i)) - # updated_kv[key] = val if( match is not None ): for i,val in enumerate(match.groups()): i1 = i+1 @@ -199,10 +194,10 @@ def AddMapAnnotations(conn, dtype, Id ): updated_kv[key] = val print("existing_kv") - for k,v in existing_kv.iteritems(): + for k,v in existing_kv.items(): print(" {} : {}".format(k,v)) print("updated_kv") - for k,v in updated_kv.iteritems(): + for k,v in updated_kv.items(): print(" {} : {}".format(k,v)) print("Are they the same?",existing_kv == updated_kv ) @@ -214,7 +209,7 @@ def AddMapAnnotations(conn, dtype, Id ): namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION map_ann.setNs(namespace) # convert the ordered dict to a list of lists - map_ann.setValue([ [k,v] for k,v in updated_kv.iteritems() ] ) + map_ann.setValue([ [k,v] for k,v in updated_kv.items() ] ) map_ann.save() image.linkAnnotation(map_ann) diff --git a/omero/annotation_scripts/MIF/Add_Key_Val_from_csv.py b/omero/annotation_scripts/MIF/Add_Key_Val_from_csv.py deleted file mode 100644 index 6a58fca98..000000000 --- a/omero/annotation_scripts/MIF/Add_Key_Val_from_csv.py +++ /dev/null @@ -1,249 +0,0 @@ -# coding=utf-8 -""" - MIF/Add_Key_Val_from_csv.py - - Adds key-value (kv) metadata to images in a dataset from a csv file - The first column contains the filenames - The first row of the file contains the keys - The rest is the values for each file/key - ------------------------------------------------------------------------------ - Copyright (C) 2018 - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ------------------------------------------------------------------------------- -@author Christian Evenhuis -christian.evenhuis@gmail.com -@version 5.3 -@since 5.3 - -""" - -import omero -from omero.gateway import BlitzGateway -from omero.rtypes import rstring, rlong -import omero.scripts as scripts -from omero.model import PlateI, ScreenI, DatasetI -from omero.rtypes import * -from omero.cmd import Delete2 - -import sys -import csv -import copy - -# this is for downloading a temp file -from omero.util.temp_files import create_path - -from omero.util.populate_roi import DownloadingOriginalFileProvider -from omero.util.populate_metadata import ParsingContext - -from collections import OrderedDict - - -import numpy as np - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def get_existing_MapAnnotions( obj ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ord_dict = OrderedDict() - for ann in obj.listAnnotations(): - if( isinstance(ann, omero.gateway.MapAnnotationWrapper) ): - kvs = ann.getValue() - for k,v in kvs: - ord_dict[k] = v - return ord_dict - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def remove_MapAnnotations(conn, dtype, Id ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image = conn.getObject(dtype,int(Id)) - namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION - - filename = image.getName() - - anns = list( image.listAnnotations()) - mapann_ids = [ann.id for ann in anns - if isinstance(ann, omero.gateway.MapAnnotationWrapper) ] - - try: - delete = Delete2(targetObjects={'MapAnnotation': mapann_ids}) - handle = conn.c.sf.submit(delete) - conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, - failontimeout=False, closehandle=False) - - except Exception, ex: - print("Failed to delete links: {}".format(ex.message)) - return - -def get_original_file(conn, object_type, object_id, file_ann_id=None): - #if object_type == "Plate": - # omero_object = conn.getObject("Plate", int(object_id)) - # if omero_object is None: - # sys.stderr.write("Error: Plate does not exist.\n") - # sys.exit(1) - #else: - # omero_object = conn.getObject("Screen", int(object_id)) - # if omero_object is None: - # sys.stderr.write("Error: Screen does not exist.\n") - # sys.exit(1) - omero_object = conn.getObject("Dataset", int(object_id)) - print omero_object.getName() - if omero_object is None: - sys.stderr.write("Error: Dataset does not exist.\n") - sys.exit(1) - file_ann = None - - for ann in omero_object.listAnnotations(): - if isinstance(ann, omero.gateway.FileAnnotationWrapper): - file_name = ann.getFile().getName() - print file_name - # Pick file by Ann ID (or name if ID is None) - if (file_ann_id is None and file_name.endswith(".csv")) or ( - ann.getId() == file_ann_id): - file_ann = ann - if file_ann is None: - sys.stderr.write("Error: File does not exist.\n") - sys.exit(1) - - return file_ann.getFile()._obj - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def populate_metadata(client, conn, script_params): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dataType = script_params["Data_Type"] - ids = script_params["IDs"] - - datasets = list(conn.getObjects(dataType, ids)) - for ds in datasets: - ID = ds.getId() - - # not sure what this is doing - file_ann_id = None - if "File_Annotation" in script_params: - file_ann_id = long(script_params["File_Annotation"]) - print("set ann id") - - original_file = get_original_file( - conn, dataType, ID, file_ann_id) - provider = DownloadingOriginalFileProvider(conn) - file_handle = provider.get_original_file_data(original_file) - - - # create a dictionary for image_name:id - dict_name_id={} - for img in ds.listChildren(): - img_name = img.getName() - if( img_name in dict_name_id ): - sys.stderr.write("File names not unique: {}".format(imageaname)) - sys.exit(1) - dict_name_id[img_name] = int(img.getId()) - - - # step through the csv file - data =list(csv.reader(file_handle,delimiter=',')) - - # keys are in the header row - header =data[0] - kv_data = header[1:] # first header is the fimename columns - rows = data[1:] - - nimg_updated = 0 - for row in rows: # first row is the header - print(row) - img_name = row[0] - if( img_name not in dict_name_id ): - print("Can't find filename : {}".format(img_name) ) - else: - img_ID = dict_name_id[img_name] # look up the ID - img = conn.getObject('Image',img_ID) # get the img - - existing_kv = get_existing_MapAnnotions( img ) - updated_kv = copy.deepcopy(existing_kv) - print("Existing kv ") - for k,v in existing_kv.iteritems(): - print(k,v) - - for i in range(1,len(row)): # first entry is the filename - key = header[i].strip() - vals = row[i].strip().split(';') - if( len(vals) > 0 ): - for val in vals: - if( len(val)>0 ): updated_kv[key] = val - - - if( existing_kv != updated_kv ): - nimg_updated = nimg_updated + 1 - print("The key-values pairs are different") - remove_MapAnnotations( conn, 'Image', img.getId() ) - map_ann = omero.gateway.MapAnnotationWrapper(conn) - namespace = omero.constants.metadata.NSCLIENTMAPANNOTATION - namesoace = omero.constants.metadata.NSBULKANNOTATIONS - map_ann.setNs(namespace) - # convert the ordered dict to a list of lists - map_ann.setValue([ [k,v] for k,v in updated_kv.iteritems() ] ) - map_ann.save() - img.linkAnnotation(map_ann) - else: - print("No change change in kv's") - - return "Added {} kv pairs to {}/{} files ".format(len(header)-1,nimg_updated,len(dict_name_id)) - - -def run_script(): - - data_types = [rstring('Dataset')] - client = scripts.client( - 'Add_Key_Val_from_csv', - """ - This script processes a csv file, attached to a Dataset - """, - scripts.String( - "Data_Type", optional=False, grouping="1", - description="Choose source of images", - values=data_types, default="Dataset"), - - scripts.List( - "IDs", optional=False, grouping="2", - description="Plate or Screen ID.").ofType(rlong(0)), - - scripts.String( - "File_Annotation", grouping="3", - description="File ID containing metadata to populate."), - - authors=["Christian Evenhuis"], - institutions=["MIF UTS"], - contact="christian.evenhuis@gmail.com" - ) - - try: - # process the list of args above. - script_params = {} - for key in client.getInputKeys(): - if client.getInput(key): - script_params[key] = client.getInput(key, unwrap=True) - - # wrap client to use the Blitz Gateway - conn = BlitzGateway(client_obj=client) - message="here I am" - print "scaript params" - for k,v in script_params.iteritems(): - print k,v - message = populate_metadata(client, conn, script_params) - client.setOutput("Message", rstring(message)) - - finally: - client.closeSession() - - -if __name__ == "__main__": - run_script() diff --git a/omero/annotation_scripts/MIF/Create_Metadata_from_csv.py b/omero/annotation_scripts/MIF/Create_Metadata_from_csv.py deleted file mode 100644 index efbc28561..000000000 --- a/omero/annotation_scripts/MIF/Create_Metadata_from_csv.py +++ /dev/null @@ -1,187 +0,0 @@ -# coding=utf-8 -""" - MIF/Create_Metadata_csv.py - - Reads the metadata associated with the images in a dataset - a creates a csv file attached to dataset - ------------------------------------------------------------------------------ - Copyright (C) 2018 - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ------------------------------------------------------------------------------- -@author Christian Evenhuis -christian.evenhuis@gmail.com -@version 5.3 -@since 5.3 - -""" - -import omero -from omero.gateway import BlitzGateway -from omero.rtypes import rstring, rlong -import omero.scripts as scripts -from omero.model import PlateI, ScreenI, DatasetI -from omero.rtypes import * -from omero.cmd import Delete2 - -import tempfile - -import os,sys -import csv -import copy -import numpy as np -from collections import OrderedDict - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def GetExistingMapAnnotions( obj ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ord_dict = OrderedDict() - for ann in obj.listAnnotations(): - if( isinstance(ann, omero.gateway.MapAnnotationWrapper) ): - kvs = ann.getValue() - for k,v in kvs: - ord_dict[k] = v - return ord_dict - - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def attach_csv_file( conn, obj, data ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ''' writes the data (list of dicts) to a file - and attaches it to the object - conn : connection to OMERO (need to annotation creation - obj : the object to attach the file file to - data : the data - ''' - # create the tmp directory - tmp_dir = tempfile.mkdtemp(prefix='MIF_meta') - (fd, tmp_file) = tempfile.mkstemp(dir=tmp_dir, text=True) - - print("tmp_dir", tmp_dir) - #print("tmp_file",tmp_file) - # get the union of the keys - key_union=OrderedDict() - for img_k,img_v in data.iteritems(): - key_union.update(img_v) - - all_keys = key_union.keys() - def to_csv( ll ): - nl = len(ll) - fmstr = "{}, "*(nl-1)+"{}\n" - return fmstr.format(*ll) - tfile = os.fdopen(fd, 'w') - - header = ['filename']+all_keys - tfile.write( to_csv( header ) ) - - for fname,kv_dict in data.iteritems(): - row = [fname]+[ kv_dict.get(key,"") for key in all_keys ] - tfile.write( to_csv( row ) ) - tfile.close() - - - name = "{}_metadata_out.csv".format(obj.getName()) - # link it to the object - ann = conn.createFileAnnfromLocalFile( - tmp_file, origFilePathAndName=name, - ns='MIF_test' ) - - ann = obj.linkAnnotation(ann) - - # remove the tmp file - os.remove(tmp_file) - os.rmdir (tmp_dir ) - return "done" - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def run_script(): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - data_types = [rstring('Dataset')] - client = scripts.client( - 'Create_Metadata_csv', - """ - This script reads the metadata attached data set and creates - a csv file attached to the Dataset - """, - scripts.String( - "Data_Type", optional=False, grouping="1", - description="Choose source of images", - values=data_types, default="Dataset"), - - scripts.List( - "IDs", optional=False, grouping="2", - description="Plate or Screen ID.").ofType(rlong(0)), - - - authors=["Christian Evenhuis"], - institutions=["MIF UTS"], - contact="christian.evenhuis@gmail.com" - ) - - try: - # process the list of args above. - script_params = {} - for key in client.getInputKeys(): - if client.getInput(key): - script_params[key] = client.getInput(key, unwrap=True) - - # wrap client to use the Blitz Gateway - conn = BlitzGateway(client_obj=client) - - dataType = script_params["Data_Type"] - ids = script_params["IDs"] - datasets = list(conn.getObjects(dataType, ids)) # generator of images or datasets - for ds in datasets: - # name of the file - csv_name = "{}_metadata_out.csv".format(ds.getName()) - print(csv_name) - - # check to see if the file exists - for ann in ds.listAnnotations(): - if( isinstance(ann, omero.gateway.FileAnnotationWrapper) ): - if( ann.getFileName() == csv_name ): - # if the name matches delete it - try: - delete = Delete2(targetObjects={'FileAnnotation': [int(ann.getId())]}) - handle = conn.c.sf.submit(delete) - conn.c.waitOnCmd(handle, loops=10, ms=500, failonerror=True, - failontimeout=False, closehandle=False) - print("Deleted") - except Exception, ex: - print("Failed to delete links: {}".format(ex.message)) - - # assemble the metadata - file_names = [ img.getName() for img in list(ds.listChildren()) ] - kv_dict = OrderedDict() - for img in ds.listChildren(): - fn = img.getName() - im_kv = GetExistingMapAnnotions(img) - kv_dict[fn] = GetExistingMapAnnotions(img) - - for k,v in kv_dict.iteritems(): - print(k) - print(v) - - # attach the data - mess = attach_csv_file( conn, ds, kv_dict ) - mess="done" - client.setOutput("Message", rstring(mess)) - - finally: - client.closeSession() - - -if __name__ == "__main__": - run_script() diff --git a/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties.py b/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties.py deleted file mode 100644 index 68811811a..000000000 --- a/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -""" -Created on Mon Nov 20 14:16:53 2017 - -@author: evenhuis - -FOR TRAINING PURPOSES ONLY! - -Change this file to Parse_OMERO_Properties.py and enter your ID/username -""" - -import omero -import os -import sys - -try: - omero_app_url = os.environ["OMERO_APP_URL"] - omero_username = os.environ["OMERO_USERNAME"] - omero_user_password = os.environ["OMERO_USER_PASSWORD"] -except KeyError: - print "Please set the environment variable OMERO_USERNAME, OMERO_USER_PASSWORD and OMERO_APP_URL" - sys.exit(1) - -client = omero.client(omero_app_url) - -omeroProperties = client.getProperties().getPropertiesForPrefix('omero') - -# Configuration -# ================================================================= -# These values will be imported by all the other training scripts. -HOST = omeroProperties.get('omero.host', omero_app_url) -PORT = omeroProperties.get('omero.port', 4064) -USERNAME = omeroProperties.get('omero.user', omero_username) -PASSWORD = omeroProperties.get('omero.pass', omero_user_password) -OMERO_WEB_HOST = omeroProperties.get('omero.webhost') -SERVER_NAME = omeroProperties.get(omero_app_url) -# projectId = omeroProperties.get('omero.projectid') -# datasetId = omeroProperties.get('omero.datasetid') -# imageId = omeroProperties.get('omero.imageid') diff --git a/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties_example.py b/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties_example.py deleted file mode 100644 index d5b781414..000000000 --- a/omero/annotation_scripts/cli_scripts/Parse_OMERO_Properties_example.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -""" -Created on Mon Nov 20 14:16:53 2017 - -@author: evenhuis -""" - -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# -# Copyright (C) 2015 University of Dundee & Open Microscopy Environment. -# All Rights Reserved. -# Use is subject to license terms supplied in LICENSE.txt -# - -""" -FOR TRAINING PURPOSES ONLY! - -Change this file to Parse_OMERO_Properties.py and enter your ID/username -""" - -import omero - -client = omero.client('omero-app.research.uts.edu.au') - -omeroProperties = client.getProperties().getPropertiesForPrefix('omero') - -# Configuration -# ================================================================= -# These values will be imported by all the other training scripts. -HOST = omeroProperties.get('omero.host', 'omero-app.research.uts.edu.au') -PORT = omeroProperties.get('omero.port', 4064) -USERNAME = omeroProperties.get('omero.user','111111') -PASSWORD = omeroProperties.get('omero.pass','your-password') -OMERO_WEB_HOST = omeroProperties.get('omero.webhost') -SERVER_NAME = omeroProperties.get('omero-app.research.uts.edu.au') -#projectId = omeroProperties.get('omero.projectid') -#datasetId = omeroProperties.get('omero.datasetid') -#imageId = omeroProperties.get('omero.imageid') diff --git a/omero/annotation_scripts/cli_scripts/download_files.py b/omero/annotation_scripts/cli_scripts/download_files.py deleted file mode 100644 index 0849bc3f7..000000000 --- a/omero/annotation_scripts/cli_scripts/download_files.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 5 10:09:13 2018 - -@author: evenhuis -""" -# from Parse_OMERO_Properties import datasetId, imageId, plateId - -import sys -import argparse -import os - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def download_dataset(conn, Id, path, orig=False, tif=False): - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - """ - download a dataset from OMERO - INPUT : conn, the connection needs to be open - Id : ID of the dataset - path : location of local filesystem - fmt : "o" is original , "t" is tiff - """ - - # get the data set - dataset = conn.getObject('Dataset', Id) - if (dataset == None): - print("Dataset ID {} not found in group".format(Id)) - sys.exit(1) - print("here") - - # get the images - imgs = list(dataset.listChildren()) - - # this is the directory to place the data in - ds_name = dataset.getName() - print("{}/".format(ds_name)) - reldir = os.path.join(path, ds_name) - if (not os.path.isdir(reldir)): - os.makedirs(reldir) - - for img in imgs: - print(" " * len(ds_name) + "/{}".format(img.getName())) - - if (orig): - for orig in img.getImportedImageFiles(): - name = orig.getName() - file_path = os.path.join(reldir, name) - - if (not os.path.exists(file_path)): - with open(str(file_path), 'w') as f: - for chunk in orig.getFileInChunks(): - f.write(chunk) - - if (tif): - name = os.path.basename(img.getName()) + ".ome.tif" - file_path = os.path.join(reldir, name) - file_size, block_gen = img.exportOmeTiff(bufsize=65536) - with open(str(file_path), "wb") as f: - for piece in block_gen: - f.write(piece) - - return - - -""" -start-code -""" - -parser = argparse.ArgumentParser(description='Download datasets and projects from OMERO') -parser.add_argument('-p', '--project', nargs="+", default=[], help="IDs of projects to download") -parser.add_argument('-d', '--dataset', nargs="+", default=[], help="IDs of datasets to download") -parser.add_argument('-g', '--group', nargs="?", help="name of group") -parser.add_argument('-o', '--orig', action="store_true", default=False, help="download originals") -parser.add_argument('-t', '--tif', action="store_true", default=False, help="download OME-TIFs") - -args = parser.parse_args() - -# Create a connection -# =================== - - -from omero.gateway import BlitzGateway -from Parse_OMERO_Properties import USERNAME, PASSWORD, HOST, PORT - -print(HOST) - -conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) - -try: - conn.connect() - - user = conn.getUser() - print "Current user:" - print " ID:", user.getId() - print " Username:", user.getName() - print " Full Name:", user.getFullName() - - if args.group is not None: - print("change group") - new_group = args.group - groups = [g.getName() for g in conn.listGroups()] - print(groups) - if new_group not in groups: - print("{} not found in groups:".format(new_group)) - for gn in groups: - print(" {}".format(gn)) - sys.exit(1) - else: - conn.setGroupNameForSession(new_group) - - path = os.path.join(os.getcwd(), 'downloads') - print(args.dataset) - for d_id in args.dataset: - download_dataset(conn, d_id, path, orig=args.orig, tif=args.tif) - - print(args.project) - for p_id in args.project: - project = conn.getObject('Project', p_id) - path_p = os.path.join(path, project.getName()) - if project is None: - print("project ID {} not found in group {}".format(p_id, orig=args.orig, tif=args.tif)) - sys.exit(1) - - for ds in list(project.listChildren()): - download_dataset(conn, ds.getId(), path_p, orig=args.orig, tif=args.tif) - -except Exception: - print "There was a problem trying to connect" - sys.exit(1) - -finally: - # When you are done, close the session to free up server resources. - conn.close() diff --git a/omero/annotation_scripts/cli_scripts/download_files_5.2.py b/omero/annotation_scripts/cli_scripts/download_files_5.2.py deleted file mode 100644 index 0216e96d1..000000000 --- a/omero/annotation_scripts/cli_scripts/download_files_5.2.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 5 10:09:13 2018 - -@author: evenhuis -""" -#from Parse_OMERO_Properties import datasetId, imageId, plateId - -import sys -import argparse -import os - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def download_dataset( conn, Id, path, orig=False, tif=False ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ''' download a dataset from OMERO - INPUT : conn : the connection needs to be open - Id : ID of the dataset - path : location of local filesystem - fmt : "o" is orginal , "t" is tiff - ''' - - # get the data set - dataset = conn.getObject('Dataset',Id) - if( dataset==None ): - print("Dataset ID {} not found in group".format(Id)) - sys.exit(1) - print("here") - - # get the images - imgs = list(dataset.listChildren()) - - # this is the directory to place the data in - ds_name = dataset.getName() - print("{}/".format(ds_name) ) - reldir = os.path.join( path, ds_name) - if( not os.path.isdir(reldir) ): - os.makedirs(reldir) - - for img in imgs: - print(" "*len(ds_name)+"/{}".format(img.getName())) - - if( orig ): - for orig in img.getImportedImageFiles(): - name = orig.getName() - file_path = os.path.join( reldir, name) - - if( not os.path.exists( file_path) ): - with open(str(file_path), 'w') as f: - for chunk in orig.getFileInChunks(): - f.write(chunk) - - if( tif ): - name = os.path.basename(img.getName())+".ome.tif" - file_path = os.path.join(reldir, name) - file_size, block_gen = img.exportOmeTiff(bufsize=65536) - with open(str(file_path), "wb") as f: - for piece in block_gen: - f.write(piece) - - return - -""" -start-code -""" - - -parser = argparse.ArgumentParser(description='Download datasets and projects from OMERO') -parser.add_argument('-p','--project', nargs="+", default=[],help="IDs of projects to download") -parser.add_argument('-d','--dataset', nargs="+", default=[],help="IDs of datasets to download") -parser.add_argument('-g','--group' , nargs="?", help="name of group") -parser.add_argument('-o','--orig' , action="store_true", default=False, help="download originals") -parser.add_argument('-t','--tif' , action="store_true", default=False, help="download OME-TIFs" ) - -args = parser.parse_args() - -# Create a connection -# =================== -try: - from omero.gateway import BlitzGateway - from Parse_OMERO_Properties_52 import USERNAME, PASSWORD, HOST, PORT - print(HOST) - conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) - conn.connect() - - user = conn.getUser() - print "Current user:" - print " ID:", user.getId() - print " Username:", user.getName() - print " Full Name:", user.getFullName() - - if( args.group is not None ): - print("change group") - new_group = args.group - groups = [ g.getName() for g in conn.listGroups() ] - print(groups) - if( new_group not in groups ): - print("{} not found in groups:".format(new_group)) - for gn in groups: - print(" {}".format(gn)) - sys.exit(1) - else: - conn.setGroupNameForSession(new_group) - - path = os.getcwd() - print( args.dataset ) - for d_id in args.dataset: - download_dataset( conn, d_id, path, orig=args.orig, tif=args.tif ) - - print(args.project) - for p_id in args.project: - project = conn.getObject('Project',p_id) - path_p = os.path.join(path,project.getName()) - if( project==None ): - print("project ID {} not found in group {}".format(p_id, orig=args.orig, tif=args.tif)) - sys.exit(1) - - for ds in list(project.listChildren()): - download_dataset( conn, ds.getId(), path_p, orig=args.orig, tif=args.tif ) - - -finally: - # When you are done, close the session to free up server resources. - conn.close() - - diff --git a/omero/annotation_scripts/cli_scripts/download_local_files.py b/omero/annotation_scripts/cli_scripts/download_local_files.py deleted file mode 100644 index 91d3aac73..000000000 --- a/omero/annotation_scripts/cli_scripts/download_local_files.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 5 10:09:13 2018 - -@author: evenhuis -""" -#from Parse_OMERO_Properties import datasetId, imageId, plateId - -import sys -import argparse -import os -from collections import OrderedDict -import omero - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def download_dataset( conn, Id, path, orig=False, tif=False ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ''' download a dataset from OMERO - INPUT : conn : the connection needs to be open - Id : ID of the dataset - path : location of local filesystem - fmt : "o" is orginal , "t" is tiff - ''' - - # get the data set - dataset = conn.getObject('Dataset',Id) - if( dataset==None ): - print("Dataset ID {} not found in group".format(Id)) - sys.exit(1) - print("here") - - # get the images - imgs = list(dataset.listChildren()) - - # this is the directory to place the data in - ds_name = dataset.getName() - print("{}/".format(ds_name) ) - reldir = os.path.join( path, ds_name) - if( not os.path.isdir(reldir) ): - os.makedirs(reldir) - - for img in imgs: - print(" "*len(ds_name)+"/{}".format(img.getName())) - - if( orig ): - for orig in img.getImportedImageFiles(): - name = orig.getName() - file_path = os.path.join( reldir, name) - - print name, orig.getId(),orig.canDownload() - - if( not os.path.exists( file_path) ): - with open(str(file_path), 'w') as f: - for chunk in orig.getFileInChunks(): - f.write(chunk) - - if( tif ): - name = os.path.basename(img.getName())+".ome.tif" - file_path = os.path.join(reldir, name) - file_size, block_gen = img.exportOmeTiff(bufsize=65536) - with open(str(file_path), "wb") as f: - for piece in block_gen: - f.write(piece) - - return - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def download_file( conn, Id, reldir, new_name ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - img = conn.getObject('Image',Id) - if( img is None ): return False - - # create the directory if it does not exist - if( not os.path.isdir(reldir) ): - os.makedirs(reldir) - - # dowload the files - for orig in img.getImportedImageFiles(): - name,ext = os.path.splitext(orig.getName()) - file_path = os.path.join( reldir, new_name+ext) - print name - print orig.getId() - print orig.canDownload() - - if( not os.path.exists( file_path) ): - with open(str(file_path), 'w') as f: - for chunk in orig.getFileInChunks(): - f.write(chunk) - - # get the thumbnail - thumb_dir = os.path.join(reldir,'thumbs') - if( not os.path.isdir(thumb_dir) ): - os.makedirs(thumb_dir) - - thumb_name = os.path.join(thumb_dir,new_name+".jpg") - if( not os.path.exists( thumb_name ) ): - thumb=conn.getThumbnailSet([Id],128) - fobj = open(thumb_name, "wb") - fobj.write(thumb[Id]) - fobj.close() - - - return - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def setup_dict(): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - meta_dict={} - meta_dict["@context"]=OrderedDict([ - ("@vocab", "http://schema.org/"), - ("schema", "http://schema.org/"), - ("path", "schema:path"), - ("identifier", "schema:identifier"), - ("startTime", "schema:startTime"), - ("endTime", "schema:endTime"), - ("description", "schema:description"), - ("dateCreated", "schema:dateCreated"), - ("contentSize", "schema:contentSize"), - ("creator", "schema:creator"), - ("category", "schema:category"), - ("fileFormat", "schema:fileFormat"), - ("dateModified", "schema:dateModified") - ]) - - institute=OrderedDict([ - ("@id", "http://uts.edu.au"), - ("@type", "Organization"), - ("address", "Broadway, 2007, NSW Australia"), - ("identifier", "http://uts.edu.au"), - ("name", "University of Technology Sydney") - ]) - person=OrderedDict([ - ("@id", "http://github.com/moisbo"), - ("@type", "Person"), - ("affiliation", { - "@id": "http://uts.edu.au" - }), - ("email", "moises.sacal@uts.edu.au"), - ("familyName", "Sacal"), - ("givenName", "Moises"), - ("identifier", "http://github.com/moisbo"), - ("name", "Moises Sacal") - ]) - crate=OrderedDict([ - ("@id", "https://dx.doi.org/10.5281/zenodo.1009240"), - ("@type", "Dataset"), - ("isOutputOf", "DataCrate"), - ("contact", { - "@id": "http://github.com/moisbo" - }), - ("contentLocation", { - "@id": "http://uts.edu.au" - }), - ("path", "./"), - ("creator", { - "@id": "http://github.com/moisbo" - }), - ("datePublished", "2017-06-29"), - ("description", "This is sample data of OMERO"), - ("hasPart", [ - { - "@id": "images" - } - ]), - ("identifier", "https://dx.doi.org/10.5281/zenodo.1009240"), - ("keywords", "OMERO"), - ("name", "Sample dataset OMERO"), - ("publisher", { - "@id": "http://uts.edu.au" - }), - ("relatedLink", { - "@id": "http://github.com/moisbo", - "OmeroURl": "https://omero.research.uts.edu.au" - }), - ("temporalCoverage", "2017") - ]) - images=OrderedDict([ - ("@id", "images"), - ("@type", "Dataset"), - ("path", "data"), - ("identifier", "images"), - ("startTime", "2016-01-21T11:00:00+11:00"), - ("endTime", "2016-11-21T11:00:00+11:00"), - ("description", "This is a test datacrate of files pulled from OMERO"), - ("hasPart", list(dict())), - ("funder", [{ - "@id": "http://uts.edu.au" - }]) - ]) - meta_dict["@graph"]=list([institute,person,crate,images]) - return meta_dict -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -def json_metadata( conn, Id, reldir, new_name ): -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - img = conn.getObject("Image",Id) - name,ext = os.path.splitext(img.getName()) - filepath = os.path.join(reldir,new_name+ext) - thumbpath = os.path.join(reldir,'thumbs',new_name+'.jpg') - file_dict=OrderedDict([ - ("@id", filepath), - ("@type", "File"), - ("creator", { - "@id": "http://github.com/moisbo" - }), - ("category", "PROCESSED"), - ("fileFormat", "text/plain"), - ("path", filepath), - ("filename", filepath), - ("thumbnail", [ - { - "@id": thumbpath - } - ]) - ]) - - details=img.getDetails() - file_dict["omeroName"] =img.getName() - file_dict["description"] =img.getDescription() - file_dict["omeroId" ] =Id - file_dict["omeroAuthor"] =img.getAuthor() - file_dict["omeroGroup"] =details.getGroup().getName() - - # print the dataset/proj (if they exist) - otype="" - parent = img.getParent() - while( parent is not None ): - otype=parent.OMERO_CLASS - file_dict["omero"+otype] = parent.getName() - parent = parent.getParent() - - file_dict["dateUploaded"]=img.getDate().isoformat() - file_dict["dateCreated" ]=img.creationEventDate().isoformat() - - file_dict["channels"]=img.getChannelLabels() - - # list the user added kvs' from the mapAnnotation - for ann in img.listAnnotations(): - if( isinstance(ann, omero.gateway.MapAnnotationWrapper) ): - for k,v in ann.getValue(): - file_dict[k]=v - - thumb_dict= { - "@type": "File", - "path": [ - thumbpath - ], - "@id": thumbpath - } - return file_dict,thumb_dict - -""" -start-code -""" -import sys -import json - -parser = argparse.ArgumentParser(description='Download datasets and projects from OMERO') -parser.add_argument('-p','--project', nargs="+", default=[],help="IDs of projects to download") -parser.add_argument('-d','--dataset', nargs="+", default=[],help="IDs of datasets to download") -parser.add_argument('-g','--group' , nargs="?", help="name of group") -parser.add_argument('-o','--orig' , action="store_true", default=False, help="download originals") -parser.add_argument('-t','--tif' , action="store_true", default=False, help="download OME-TIFs" ) - -args = parser.parse_args() - -# Create a connection -# =================== -try: - from omero.gateway import BlitzGateway - from Parse_OMERO_Properties import USERNAME, PASSWORD, HOST, PORT - print(HOST) - conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) - conn.connect() - - user = conn.getUser() - print "Current user:" - print " ID:", user.getId() - print " Username:", user.getName() - print " Full Name:", user.getFullName() - - if( args.group is not None ): - print("change group") - new_group = args.group - groups = [ g.getName() for g in conn.listGroups() ] - print(groups) - if( new_group not in groups ): - print("{} not found in groups:".format(new_group)) - for gn in groups: - print(" {}".format(gn)) - sys.exit(1) - else: - conn.setGroupNameForSession(group) - print conn.getGroupFromContext().getName() - path = os.getcwd() - - mdict= setup_dict() - - def package_file( mdict,conn, group, Id, path, name ): - print "processing {} {} as : {}/{}".format(group,Id,path,name) - groups = [ g.getName() for g in conn.listGroups() ] - if( group not in groups ): - print " no group found" - return - if( group != conn.getGroupFromContext().getName() ): - conn.setGroupNameForSession(group) - - download_file( conn, Id, path, name ) - file_dict,thumb_dict = json_metadata(conn, Id, path, name) - mdict["@graph"][3]['hasPart'].append({"@id":file_dict['@id']}) - mdict["@graph"].append(file_dict) - mdict["@graph"].append(thumb_dict) - - return mdict - - - mdict=package_file( mdict, conn, 'training', 30666, 'data', 'treat4') - mdict=package_file( mdict, conn, 'default', 1130, 'data', 'treat3') - mdict=package_file( mdict, conn, 'default', 22422, 'data', 'treat1') - mdict=package_file( mdict, conn, 'default', 1159, 'data', 'treat2') - mdict=package_file( mdict, conn, 'whitchurch', 17242, 'control','control1') - mdict=package_file( mdict, conn, 'djordjevic', 4128, 'control','control2') - - - - # write the JSON - j = json.dumps(mdict, indent=4) - f = open('CATALOG.json', 'w') - print >> f, j - f.close() - - #print( args.dataset ) - #for d_id in args.dataset: - # download_dataset( conn, d_id, path, orig=args.orig, tif=args.tif ) - - #print(args.project) - #for p_id in args.project: - # project = conn.getObject('Project',p_id) - # path_p = os.path.join(path,project.getName()) - # if( project==None ): - # print("project ID {} not found in group {}".format(p_id, orig=args.orig, tif=args.tif)) - # sys.exit(1) - - # for ds in list(project.listChildren()): - # download_dataset( conn, ds.getId(), path_p, orig=args.orig, tif=args.tif ) - - # -finally: - # When you are done, close the session to free up server resources. - conn.close() -conn.close() - - diff --git a/omero/annotation_scripts/cli_scripts/downloads/.gitkeep b/omero/annotation_scripts/cli_scripts/downloads/.gitkeep deleted file mode 100644 index e69de29bb..000000000