diff --git a/FMEUtil/FMEUtil/FMEScheduleLib.py b/FMEUtil/FMEUtil/FMEScheduleLib.py new file mode 100644 index 0000000..5de3816 --- /dev/null +++ b/FMEUtil/FMEUtil/FMEScheduleLib.py @@ -0,0 +1,292 @@ +''' +Created on Jul 13, 2018 + +@author: kjnether +''' +import copy +import datetime +import json +import logging +import os.path +import pprint + +import FMEUtil.PyFMEServerV2 +import deepdiff + + +class Schedules(object): + + ''' + Provides a api to work with a schedule. Mostly provides caching functionality + Caching exists due to extremely poor performance of schedule queries against + fme server 2015. + :ivar fme: a fme server object + :type fme: FMEUtil.PyFMEServerV2.FMEServer + + :ivar scheds: fme schedule object + :type scheds: FMEUtil.PyFMEServerV2.Schedule + + :ivar schedStruct: a data structure describing the schedules for the + fme server instance that was specified + :type schedStruct: dict + + :ivar schedStructComparison: This is a duplication of schedStruct with the + fields described in flds2Ignore removed from the structures + within this list + + :ivar flds2Ignore: fields that should be ignored when doing any comparison + operations. (in, -, +, etc) + :type flds2Ignore: list + ''' + + def __init__(self, fmeServUrl, fmeServToken, cacheLabel, cacheDir, + refreshCache=False, ignorePP=False): + ''' + :param fmeServUrl: url to fme server, don't include paths + :param fmeServToken: token to fme server + :param cacheLabel: a label that is used to calculate the schedule cache + file name + :param cacheDir: directory where the cache file should be located + :param refreshCache: whether the cache should be refreshed or not. + cached schedules have a date on them. Never + only valid for the day that they were generated + for. + ''' + self.logger = logging.getLogger(__name__) + self.pp = pprint.PrettyPrinter(indent=4) + self.logger.debug("test log config") + self.fme = FMEUtil.PyFMEServerV2.FMEServer(fmeServUrl, fmeServToken) + self.scheds = self.fme.getSchedules() + dateTimeStamp = datetime.datetime.now().strftime('%Y-%m-%d') + # cacheDir = os.path.normpath(os.path.join(os.path.dirname(__file__), + # '..', 'data')) + self.cacheFile = 'scheds_{0}_{1}.json'.format(cacheLabel, dateTimeStamp) + self.cacheFile = os.path.join(cacheDir, self.cacheFile) + if refreshCache: + if os.path.exists(self.cacheFile): + os.remove(self.cacheFile) + self.schedStruct = None + + # These are fields in the schedules that should be ignored + # when doing comparisons between the two structures + self.flds2Ignore = ['begin', 'enabled'] + # this switch can be set using the method setIgnoredFields() + # when set to true published parameters are not considered in the + # comparison of the schedules. + self.ignorePP = ignorePP + # this data struct will get populated with everything in self.schedStruct + # except the fields described in flds2Ignore + self.schedStructComparison = [] + self.getSchedules() + + def getPyFMESchedule(self): + ''' + :return: the pyFMEServer schedule object used in this class + :type param: FMEUtil.PyFMEServerV2.Schedule + ''' + return self.scheds + + def getPyFME(self): + ''' + :return: a FMEServer object + :rtype: FMEUtil.PyFMEServerV2.FMEServer: + ''' + return self.fme + + def getScheduleData(self): + ''' + :return: the data structure that describes the schedules in this + object. + ''' + return self.schedStruct + + def setIgnoredFields(self, flds, ignorePublishedParameters=True): + ''' + When doing a comparison between schedule objects you can set a + list of fields to ignore when doing the comparison. There are + certain fields that are set by default, this method allows you to + define your own fields. + :param flds: a list of fields that should be ignored when comparing + schedule objects + ''' + self.ignorePP = ignorePublishedParameters + self.flds2Ignore = flds + schedStructCleaned = [] + # schedIterator = self.schedStruct[0:] + schedIterator = copy.deepcopy(self.schedStruct) + for schedRef in schedIterator: + sched = schedRef.copy() + for fld2Del in self.flds2Ignore: + if fld2Del in sched: + # self.logger.debug("cleaning entry for: {0}".format(fld2Del)) + del sched[fld2Del] + if self.ignorePP: + # getting rid of published parameters too! + # self.pp.pprint(sched) + del sched['request']['publishedParameters'] + schedStructCleaned.append(sched) + self.schedStructComparison = schedStructCleaned + + def isEnabled(self, scheduleName): + ''' + :return: indicates if the schedule is enabled or not + :rtype: boolean + ''' + schedStruct = self.getScheduleByName(scheduleName) + return schedStruct['enabled'] + + def getSchedules(self): + ''' + If no cache file exists then gets the scheds from fme server, + otherwise loads from the cache file + + Also populates schedStructComparison which is the structure that is used + for comparisons of data structures. It has some fields removed that should + not be used for comparison operations + ''' + cacheFile = os.path.basename(self.cacheFile) + if os.path.exists(self.cacheFile): + with open(self.cacheFile) as f: + msg = 'loading the schedules from the cache file {0}'.format(cacheFile) + self.logger.info(msg) + schedStruct = json.load(f) + else: + self.logger.info("retrieving the schedules from fme server, this" + \ + "may take a while") + schedStruct = self.scheds.getSchedules() + self.logger.debug("schedStruct: {0}".format(schedStruct)) + with open(self.cacheFile, 'w') as outfile: + msg = "dumping the schedules to the cache file {0}" + self.logger.info(msg.format(cacheFile)) + json.dump(schedStruct, outfile) + + self.schedStruct = schedStruct + self.setIgnoredFields(self.flds2Ignore, self.ignorePP) + # self.logger.debug("schedStruct: {0}".format(self.schedStruct)) + + def getScheduleByName(self, scheduleName): + ''' + searches through the schedules for a schedule with the name 'scheduleName' + and returns it. + + returns None if no schedule is found + ''' + self.logger.debug("getting the parameters for the schedule: {0}".format(scheduleName)) + retVal = None + for sched in self.schedStruct: + if sched['name'] == scheduleName: + retVal = sched + break + return retVal + + def __contains__(self, sched): # pylint: disable=invalid-name + ''' + :param sched: returns true or false based on whether the sched + object exists in this collection of schedules + :type param: Schedule + ''' + # clean the submitted schedule + # self.logger.debug("called equivalent of 'in'") + retVal = False + schedCleaned = {} + for fld in sched.keys(): + if fld not in self.flds2Ignore: + schedCleaned[fld] = copy.deepcopy(sched[fld]) + # if the ignore published parameters flag is set then + # don't look at them if they are defined. + if self.ignorePP: + if 'request' in schedCleaned: + if 'publishedParameters' in schedCleaned: + del schedCleaned['request']['publishedParameters'] + + if schedCleaned in self.schedStructComparison: + retVal = True + else: + for curSched in self.schedStructComparison: + if curSched['name'] == schedCleaned['name']: + diffs = deepdiff.DeepDiff(schedCleaned, curSched) + self.logger.info("differences for {1}: {0}".format(diffs, curSched['name'])) + return retVal + + def __sub__(self, schedules): # pylint: disable=invalid-name + ''' + identifies schedules that are in self, but not in supplied + schedules + ''' + retVals = [] + for sched in self.schedStructComparison: + if sched not in schedules: + retVals.append(sched) + return retVals + + +class Parameters(object): + ''' + published parameters returned by existing schedules often include + scripted parameters. When creating a schedule you cannot specify + scripted parameters as they are... well... SCRIPTED! + + This class provides some methods that allow you to retrieve the + published parameters associated with a repository/workspace in + a format that can be used to construct a schedule, ie will not include + scripted parameters in its reference. + ''' + + def __init__(self, schedule, scheduleName): + self.logger = logging.getLogger(__name__) + self.schedule = schedule + self.scheduleStruct = self.schedule.getScheduleByName(scheduleName) + + def getPublishedParameters(self): + ''' + :return: the published parameters associated with the specified schedule. + These are retrieved not from the schedule but from the FMW that the + schedule calls. + ''' + workspcName = self.scheduleStruct['workspace'] + repoName = self.scheduleStruct['repository'] + + fme = self.schedule.getPyFME() + repo = fme.getRepository() + wrkspcs = repo.getWorkspaces(repoName) + pubParams = wrkspcs.getPublishedParams4Schedule(workspcName) + return pubParams + + def fixSchedulePublishedParameters(self): + ''' + :return: a schedule json struct that can be sent to FME Server to define + a new schedule. + ''' + # published parameters retrieved from the workspace on fme server + pubParams = self.getPublishedParameters() + # published parameters associated with the current schedule. + # includes scripted parameters, which can not be used when redefining + # a schedule. + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(self.scheduleStruct) + schedulePubParams = self.scheduleStruct['request']['publishedParameters'] + + # fixedSchedule = self.scheduleStruct['request']['publishedParameters'][0:] + + # Iterating through the published parameters associated with the + # workspace and overriding values with values that were retrieved + # from the schedule. + params4Schedule = [] + for pubParam in pubParams: + paramName = pubParam['name'] + for schedParams in schedulePubParams: + if schedParams['name'] == paramName: + msg = 'updating the schedule parameter {0} from {1} to {2}' + msg = msg.format(schedParams['name'], pubParam['value'], schedParams['value']) + self.logger.info(msg) + pubParam['value'] = schedParams['value'] + params4Schedule.append(pubParam) + + # pubParams will have name / defaultvalue keys, need to modify for + # schedules to name/value + # import pprint + # pp = pprint.PrettyPrinter(indent=4) + # pp.pprint(params4Schedule) + # raise + self.scheduleStruct['request']['publishedParameters'] = params4Schedule + return self.scheduleStruct diff --git a/FMEUtil/FMEUtil/FMWParser.py b/FMEUtil/FMEUtil/FMWParser.py index 31c0a1e..93740d1 100644 --- a/FMEUtil/FMEUtil/FMWParser.py +++ b/FMEUtil/FMEUtil/FMWParser.py @@ -151,7 +151,9 @@ def addPublishedParameters(self, InParamSet): featClsOnlyRegex = re.compile(FMWParserConstants.PUBPARAM_FEATURE_REGEX, re.I) params = self.getPublishedParams() + self.logger.debug("params: {0}".format(params)) for curEntry in InParamSet.keys(): + self.logger.debug("curEntry: {0}".format(curEntry)) curValue = InParamSet[curEntry] # setting up the regex searches @@ -159,6 +161,9 @@ def addPublishedParameters(self, InParamSet): srchSchemaOnly = schemaOnlyRegex.search(curValue) srchFeatClsOnly = featClsOnlyRegex.search(curValue) + self.logger.debug('curEntry: {0}'.format(curEntry)) + self.logger.debug('curValue: {0}'.format(curValue)) + # logic applied to the various searches if srchParamOnly: paramValue = self.extractAndReplaceParam(srchParamOnly, params) @@ -189,6 +194,12 @@ def extractAndReplaceParam(self, regex, params): :param params: The published parameters which are a list of dictionaries with the keys DEFAULT_VALUE and TYPE + + DEST_DATASET_FGDB_1 + {'DEST_GEODATABASE': {'DEFAULT_VALUE': '\\\\data.bcgov\\wwwroot\\datasets\\4cf233c2-f020-4f7a-9b87-1923252fbc24\\ParcelMapBCExtract.zip\\ParcelMapBCExtract.gdb', 'TYPE': 'DEST_DATASET_FGDB_1 Destination Geodatabase:'}, 'DEST_DB_ENV_KEY': {'DEFAULT_VALUE': 'OTHR', 'TYPE': 'DLV%TST%PRD%DEV%OTHR Destination Database Keyword (DLV|TST|PRD|OTHR):'}, 'SRC_DATASET_FGDB_1': {'DEFAULT_VALUE': '\\\\data.bcgov\\data_staging\\BCGW\\land_ownership_and_status_pmbc_secure\\ParcelMapBCExtract.gdb', 'TYPE': 'Source Geodatabase:'}, 'LOG_FILE': {'DEFAULT_VALUE': 'importDataBCFMWTemplateparams=DataBCFMWTemplate.CalcParamsFME_MacroValuesreturnparams.getFMWLogFileRelativePath', 'TYPE': 'Python Script:'}, 'SRC_FEATURE_1': {'DEFAULT_VALUE': 'Parcel_Polygon', 'TYPE': 'Source feature class:'}, 'DEST_FEATURE_1': {'DEFAULT_VALUE': 'Parcel_Polygon', 'TYPE': 'Destination feature class:'}} + {'DEST_GEODATABASE': {'DEFAULT_VALUE': '\\\\data.bcgov\\wwwroot\\datasets\\4cf233c2-f020-4f7a-9b87-1923252fbc24\\ParcelMapBCExtract.zip\\ParcelMapBCExtract.gdb', 'TYPE': 'DEST_DATASET_FGDB_1 Destination Geodatabase:'}, + 'DEST_DB_ENV_KEY': {'DEFAULT_VALUE': 'OTHR', 'TYPE': 'DLV%TST%PRD%DEV%OTHR Destination Database Keyword (DLV|TST|PRD|OTHR):'}, + 'SRC_DATASET_FGDB_1': {'DEFAULT_VALUE': '\\\\data.bcgov\\data_staging\\BCGW\\land_ownership_and_status_pmbc_secure\\ParcelMapBCExtract.gdb', 'TYPE': 'Source Geodatabase:'}, 'LOG_FILE': {'DEFAULT_VALUE': 'importDataBCFMWTemplateparams=DataBCFMWTemplate.CalcParamsFME_MacroValuesreturnparams.getFMWLogFileRelativePath', 'TYPE': 'Python Script:'}, 'SRC_FEATURE_1': {'DEFAULT_VALUE': 'Parcel_Polygon', 'TYPE': 'Source feature class:'}, 'DEST_FEATURE_1': {'DEFAULT_VALUE': 'Parcel_Polygon', 'TYPE': 'Destination feature class:'}} ''' paramValue = None pubParmName = regex.group(1) @@ -197,10 +208,27 @@ def extractAndReplaceParam(self, regex, params): pubParmName = util.stripVariableNotations(pubParmName) # Now sub in the pub param from the pubparam list # don't sub in if the pub param is scripted - if params[pubParmName]['TYPE'] <> 'Python Script:': + self.logger.debug("pub param name: {0}".format(pubParmName)) + + if pubParmName in params and params[pubParmName]['TYPE'] <> 'Python Script:': paramValue = params[pubParmName]['DEFAULT_VALUE'] + elif pubParmName not in params: + # when fme parameters are linked to datasets they will take on this structure: + # {'DEST_GEODATABASE': {'DEFAULT_VALUE': '\\\\data.bcgov\\wwwroot\\datasets\\4cf233c2-f020-4f7a-9b87-1923252fbc24\\ParcelMapBCExtract.zip\\ParcelMapBCExtract.gdb', 'TYPE': 'DEST_DATASET_FGDB_1 Destination Geodatabase:'}, + # in this case we need to fish this value out of the 'type' + for iterParamName in params.keys(): + if pubParmName in params[iterParamName]['TYPE']: + paramValue = params[iterParamName]['DEFAULT_VALUE'] + break + if paramValue is None: + msg = 'unable to extract the parameter value for the parameter ' + \ + 'name {0}. variable params: {1}' + msg = msg.format(pubParmName, params) + raise ValueError, msg else: # just leave it as is by returning the parameter name + # this is what the normal data would look like: + # 'DEST_DB_ENV_KEY': {'DEFAULT_VALUE': 'OTHR', 'TYPE': 'DLV%TST%PRD%DEV%OTHR Destination Database Keyword (DLV|TST|PRD|OTHR):'} paramValue = pubParmName return paramValue @@ -760,6 +788,18 @@ def getPublishedParameters(self): ''' return self.publishedParams + def hasFieldMap(self): + ''' + fieldmaps can be defined in two different ways, one way is to + use an "AttributeRenamer" transformer. At DataBC this is the + "preferred" way of doing this. The other way is to drag + connecting lines from either the reader or the last transformer + to the writers feature class. + + This method should be able detect both of these appoaches. + + ''' + class FMETransformers(object): ''' @@ -1125,33 +1165,38 @@ def getDataSetFormat(self): return self.datasetStruct[self.datasetFormatField] -if __name__ == '__main__': - logger = logging.getLogger(__name__) - logger.addHandler(logging.StreamHandler()) - logger.setLevel(logging.DEBUG) - - pp = pprint.PrettyPrinter(indent=4) - - inFMW = r'Z:\Workspace\kjnether\proj\DDF\fmw\abms_counties_sp_staging_gdb_bcgw.fmw' - - parsr = FMWParser(inFMW) - parsr.separateFMWComponents() - # parsr.readEverything() - - # trans = parsr.getTransformers() - - # dest = parsr.getDestDataSets() - # pp.pprint(dest) - - dest = parsr.getFeatureTypes() - pp.pprint(dest) - # dest = parsr.addPublishedParameters(dest) - # pp.pprint(dest) - - # pp = pprint.PrettyPrinter(indent=4) - # pp.pprint(dest[0]) - # print 'dest', dest[0].attrib - # parsr.getSourceDataSets() - # parsr.parseXML() - # parsr.getPublishedParams() +# if __name__ == '__main__': +# logger = logging.getLogger(__name__) +# logger.addHandler(logging.StreamHandler()) +# logger.setLevel(logging.DEBUG) +# +# pp = pprint.PrettyPrinter(indent=4) +# +# inFMW = 'testFME.fmw' +# parsr = FMWParser(inFMW) +# parsr.separateFMWComponents() +# # parsr.readEverything() +# +# # trans = parsr.getTransformers() +# +# # dest = parsr.getDestDataSets() +# # pp.pprint(dest) +# +# dest = parsr.getFeatureTypes() +# pp.pprint(dest) +# # dest = parsr.addPublishedParameters(dest) +# # pp.pprint(dest) +# +# # pp = pprint.PrettyPrinter(indent=4) +# # pp.pprint(dest[0]) +# # print 'dest', dest[0].attrib +# # parsr.getSourceDataSets() +# # parsr.parseXML() +# # parsr.getPublishedParams() +''' + currently trying to figure out how to extract fieldmaps. + FACTORY_DEF * RoutingFactory FACTORY_NAME "Destination Feature Type Routing Correlator" + + +''' \ No newline at end of file diff --git a/FMEUtil/FMEUtil/PyFMEServerV2.py b/FMEUtil/FMEUtil/PyFMEServerV2.py index 2c930f0..7f766ec 100644 --- a/FMEUtil/FMEUtil/PyFMEServerV2.py +++ b/FMEUtil/FMEUtil/PyFMEServerV2.py @@ -34,6 +34,8 @@ import requests +# pylint: disable=logging-format-interpolation + class FMERestBase(object): ''' @@ -314,6 +316,7 @@ class Logs(object): # pylint: disable=too-few-public-methods ''' def __init__(self, baseObj): + self.logger = logging.getLogger(__name__) self.baseObj = baseObj # example of v1 url to a log # V2 logs are moved under the jobs. @@ -323,7 +326,7 @@ def __init__(self, baseObj): self.url = self.baseObj.fixUrlPath(self.url) self.url = urlparse.urljoin(self.url, 'jobs', True) self.url = self.baseObj.fixUrlPath(self.url) - + self.logger.debug("url is: {0}".format(self.url)) # self.url = urlparse.urljoin(self.url, 'complete', True) # self.url = self.baseObj.fixUrlPath(self.url) @@ -332,6 +335,7 @@ def getLog(self, logId): Returns a Log object for a given log id. Job id? ''' log = Log(self, logId) + self.logger.debug("getting the log: {0}".format(logId)) return log @@ -341,8 +345,10 @@ class Schedules(object): ''' def __init__(self, baseObj): + self.logger = logging.getLogger(__name__) self.baseObj = baseObj self.url = urlparse.urljoin(self.baseObj.restUrl, self.baseObj.scheduleDir, True) + self.logger.debug("base url: {0}".format(self.url)) def getSchedule(self): ''' @@ -367,6 +373,7 @@ def exists(self, schedName, category=None): ''' schedsList = self.getSchedules(detail='low') retVal = False + self.logger.debug("schedule list: {0}".format(schedsList)) # print 'schedList:', schedsList for sched in schedsList: # self.logger.debug("current sched in iteration %s", sched) @@ -437,7 +444,8 @@ def delete(self, category, scheduleName): # catEncode = urllib.quote(category) # print 'catEncode:', catEncode # scheduleNameEncode = urllib.quote(scheduleName) - print 'schedules url', self.schedules.url + # print 'schedules url', self.schedules.url + self.logger.debug("root schedule url: {0}".format(self.schedules.url)) url = self.schedules.baseObj.fixUrlPath(self.schedules.url) # url = urlparse.urljoin(url, catEncode) @@ -446,10 +454,11 @@ def delete(self, category, scheduleName): # url = urlparse.urljoin(url, scheduleNameEncode) url = urlparse.urljoin(url, scheduleName) url = self.schedules.baseObj.fixUrlPath(url) - print 'schedule url now:', url + # print 'schedule url now:', url + self.logger.debug("schedule url: {0}".format(url)) header = {'Accept': 'application/json'} response = self.baseObj.deleteResponse(url, header=header, acceptCodes=[204]) - print 'response', response + # print 'response', response return response def disable(self, category, scheduleName): @@ -626,13 +635,13 @@ def getJobs(self, statusType='completed', detail='low', limit=None, offset=None) params['offset'] = 0 if offset: params['offset'] = str(offset) - print 'params:', params + # print 'params:', params response = self.baseObj.getResponse(url, detail=detail, additionalParams=params) cnt = 0 for job in response: jobs[job['id']] = job cnt += 1 - print 'cnt:', cnt + # print 'cnt:', cnt if cnt == 0: self.jobNullPagesRead += 1 return jobs @@ -720,9 +729,9 @@ def submitJob(self, repoName, jobName, params=None, sync=False): body['subsection'] = "REST_SERVICE" body['TMDirectives'] = {'priority': 100} - #print 'body:-----' + # print 'body:-----' pp = pprint.PrettyPrinter(indent=4) - #pp.pprint(body) + # pp.pprint(body) self.logger.debug(pp.pformat(body)) body = json.dumps(body) @@ -921,7 +930,7 @@ def updateRepository(self, repoName, fmwPath): itemUrl = urlparse.urljoin(itemUrl, 'items') itemUrl = self.baseObj.fixUrlPath(itemUrl) itemUrl = urlparse.urljoin(itemUrl, justFMW) - print 'itemUrl', itemUrl + # print 'itemUrl', itemUrl headers = {'Content-Disposition': 'attachment; filename="' + str(fmwPath) + '"', 'Content-Type': 'application/octet-stream', 'Accept': 'application/json'} @@ -975,7 +984,7 @@ def create(self, repName, descr): # Accept: application/json dataPayload = {'description': descr, 'name':repName} - print 'url:', self.url + # print 'url:', self.url response = self.baseObj.postResponseFormData(self.url, detail='high', data=dataPayload) return response @@ -987,10 +996,13 @@ class Workspaces(object): ''' def __init__(self, repos, repo): + self.logger = logging.getLogger(__name__) self.repos = repos self.repo = repo self.repoName = repo['name'] self.baseObj = self.repos.baseObj + self.logger.info('workspace obj repo: {0}'.format(self.repoName)) + self.url = urlparse.urljoin(self.baseObj.restUrl, self.baseObj.repositoryDir) self.url = self.baseObj.fixUrlPath(self.url) self.url = urlparse.urljoin(self.url, self.repoName) @@ -1069,7 +1081,7 @@ def getWorkspaceDatasets(self, wrkspcName, srcOrDest='source'): url = urlparse.urljoin(url, 'datasets') url = self.baseObj.fixUrlPath(url) url = urlparse.urljoin(url, srcOrDest) - print 'url:', url + # print 'url:', url header = {'Accept': r'application/json'} response = self.baseObj.getResponse(url, detail='high', returnType='json', header=header) return response @@ -1164,6 +1176,36 @@ def getPublishedParams(self, wrkspcName, reformat4JobReRun=False): params = reformatParams return params + def getPublishedParams4Schedule(self, wrkspcName): + ''' + Returns the published parameter for the current job with values formatted + for use in a schedule. + + Will return a list with this structure: + [ {'name': 'the name', + 'value': 'value'}, + {...}...] + + Will exclude scripted parameters. + ''' + params = self.getPublishedParams(wrkspcName) + reformattedParams = [] + for param in params: + newParam = {} + paramName = param['name'] + paramValue = param['defaultValue'] + paramType = param['type'] + + msg = 'param name: {0} value: {1} type: {2}' + msg = msg.format(paramName, paramValue, paramType) + self.logger.debug(msg) + + newParam['name'] = paramName + newParam['value'] = paramValue + + reformattedParams.append(newParam) + return reformattedParams + def downloadWorkspace(self, wrkspcName, destination): ''' :param wrkspcName: name of the workspace that you want to download @@ -1190,8 +1232,10 @@ def deleteWorkspace(self, wrkspcName): ''' url = self.baseObj.fixUrlPath(self.url) url = urlparse.urljoin(url, wrkspcName) - print 'url', url - print 'wrkspcName', wrkspcName + self.logger.debug("url: {0}".format(url)) + self.logger.info("attempting to delete: {0}".format(wrkspcName)) + # print 'url', url + # print 'wrkspcName', wrkspcName header = {'Accept': 'application/json'} resp = self.baseObj.deleteResponse(url, header=header, acceptCodes=[204]) return resp @@ -1231,7 +1275,7 @@ def getDirectory(self, dirType, dirList): # FME_SHAREDRESOURCE_LOG # FME_SHAREDRESOURCE_TEMP itemUrl = self.__calcURL(dirType, dirList) - print 'url is:', itemUrl + # print 'url is:', itemUrl addparams = {'depth': 10} dontErrorStatusCodes = [404] response = self.baseObj.getResponse(itemUrl, additionalParams=addparams, @@ -1339,14 +1383,14 @@ def copyFile(self, dirType, dirList, file2Upload, overwrite=False, createDirecto baseName = os.path.basename(file2Upload) # baseName = urllib.quote(baseName) baseName = baseName.decode('utf8') - print 'baseName', baseName + # print 'baseName', baseName headers = {'Content-Disposition': 'attachment; filename="' + str(baseName) + '"', 'Content-Type': 'application/octet-stream', 'Accept': 'application/json'} # Content-Disposition: attachment; filename="uploadtest.txt" # Accept: application/json dataPayload = open(file2Upload, 'rb') - print 'itemUrl', itemUrl + # print 'itemUrl', itemUrl response = self.baseObj.postResponseFormData(itemUrl, header=headers, data=dataPayload, params=params) dataPayload.close() @@ -1359,10 +1403,10 @@ def getResourceInfo(self, dirType, dirList): ''' # /resources/connections/< resource >/filesys/< path > itemUrl = self.__calcURL(dirType, dirList) - print 'url:', itemUrl + # print 'url:', itemUrl response = self.baseObj.getResponse(itemUrl) - pp = pprint.PrettyPrinter(indent=4) - pp.pprint(response) + # pp = pprint.PrettyPrinter(indent=4) + # pp.pprint(response) return response diff --git a/FMEUtil/setup.py b/FMEUtil/setup.py index 5cc9d10..31c4ada 100644 --- a/FMEUtil/setup.py +++ b/FMEUtil/setup.py @@ -35,7 +35,7 @@ from setuptools import setup setup(name='FMEUtil', - version='0.2', + version='0.3', description='Various utilities to help with FME workflows. PyFMEServerV2.py is the wrapper to the rest api', url='https://github.com/bcgov/dbc-pylib/tree/master/FMEUtil', author='Kevin Netherton', @@ -43,6 +43,6 @@ license='MIT', packages=['FMEUtil'], install_requires=[ - 'requests', 'lxml' + 'requests', 'lxml', 'deepdiff' ], zip_safe=False)